Tizen_4.0 base
[platform/upstream/docker-engine.git] / hack / release.sh
1 #!/usr/bin/env bash
2 set -e
3
4 # This script looks for bundles built by make.sh, and releases them on a
5 # public S3 bucket.
6 #
7 # Bundles should be available for the VERSION string passed as argument.
8 #
9 # The correct way to call this script is inside a container built by the
10 # official Dockerfile at the root of the Docker source code. The Dockerfile,
11 # make.sh and release.sh should all be from the same source code revision.
12
13 set -o pipefail
14
15 # Print a usage message and exit.
16 usage() {
17         cat >&2 <<'EOF'
18 To run, I need:
19 - to be in a container generated by the Dockerfile at the top of the Docker
20   repository;
21 - to be provided with the location of an S3 bucket and path, in
22   environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
23 - to be provided with AWS credentials for this S3 bucket, in environment
24   variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY;
25 - a generous amount of good will and nice manners.
26 The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
27
28 docker run -e AWS_S3_BUCKET=test.docker.com \
29            -e AWS_ACCESS_KEY_ID     \
30            -e AWS_SECRET_ACCESS_KEY \
31            -e AWS_DEFAULT_REGION    \
32            -it --privileged         \
33            docker ./hack/release.sh
34 EOF
35         exit 1
36 }
37
38 [ "$AWS_S3_BUCKET" ] || usage
39 [ "$AWS_ACCESS_KEY_ID" ] || usage
40 [ "$AWS_SECRET_ACCESS_KEY" ] || usage
41 [ -d /go/src/github.com/docker/docker ] || usage
42 cd /go/src/github.com/docker/docker
43 [ -x hack/make.sh ] || usage
44
45 export AWS_DEFAULT_REGION
46 : ${AWS_DEFAULT_REGION:=us-west-1}
47
48 AWS_CLI=${AWS_CLI:-'aws'}
49
50 RELEASE_BUNDLES=(
51         binary
52         cross
53         tgz
54 )
55
56 if [ "$1" != '--release-regardless-of-test-failure' ]; then
57         RELEASE_BUNDLES=(
58                 test-unit
59                 "${RELEASE_BUNDLES[@]}"
60                 test-integration-cli
61         )
62 fi
63
64 VERSION=$(< VERSION)
65 BUCKET=$AWS_S3_BUCKET
66 BUCKET_PATH=$BUCKET
67 [[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
68
69 if command -v git &> /dev/null && git rev-parse &> /dev/null; then
70         if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
71                 echo "You cannot run the release script on a repo with uncommitted changes"
72                 usage
73         fi
74 fi
75
76 # These are the 2 keys we've used to sign the deb's
77 #   release (get.docker.com)
78 #       GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
79 #   test    (test.docker.com)
80 #       GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
81
82 setup_s3() {
83         echo "Setting up S3"
84         # Try creating the bucket. Ignore errors (it might already exist).
85         $AWS_CLI s3 mb "s3://$BUCKET" 2>/dev/null || true
86         # Check access to the bucket.
87         $AWS_CLI s3 ls "s3://$BUCKET" >/dev/null
88         # Make the bucket accessible through website endpoints.
89         $AWS_CLI s3 website --index-document index --error-document error "s3://$BUCKET"
90 }
91
92 # write_to_s3 uploads the contents of standard input to the specified S3 url.
93 write_to_s3() {
94         DEST=$1
95         F=`mktemp`
96         cat > "$F"
97         $AWS_CLI s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST"
98         rm -f "$F"
99 }
100
101 s3_url() {
102         case "$BUCKET" in
103                 get.docker.com|test.docker.com|experimental.docker.com)
104                         echo "https://$BUCKET_PATH"
105                         ;;
106                 *)
107                         BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com"
108                         if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
109                                 echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
110                         else
111                                 echo "$BASE_URL"
112                         fi
113                         ;;
114         esac
115 }
116
117 build_all() {
118         echo "Building release"
119         if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
120                 echo >&2
121                 echo >&2 'The build or tests appear to have failed.'
122                 echo >&2
123                 echo >&2 'You, as the release  maintainer, now have a couple options:'
124                 echo >&2 '- delay release and fix issues'
125                 echo >&2 '- delay release and fix issues'
126                 echo >&2 '- did we mention how important this is?  issues need fixing :)'
127                 echo >&2
128                 echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
129                 echo >&2 ' really knows all the hairy problems at hand with the current release'
130                 echo >&2 ' issues) may bypass this checking by running this script again with the'
131                 echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
132                 echo >&2 ' running the test suite, and will only build the binaries and packages.  Please'
133                 echo >&2 ' avoid using this if at all possible.'
134                 echo >&2
135                 echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
136                 echo >&2 ' should be used.  If there are release issues, we should always err on the'
137                 echo >&2 ' side of caution.'
138                 echo >&2
139                 exit 1
140         fi
141 }
142
143 upload_release_build() {
144         src="$1"
145         dst="$2"
146         latest="$3"
147
148         echo
149         echo "Uploading $src"
150         echo "  to $dst"
151         echo
152         $AWS_CLI s3 cp --follow-symlinks --acl public-read "$src" "$dst"
153         if [ "$latest" ]; then
154                 echo
155                 echo "Copying to $latest"
156                 echo
157                 $AWS_CLI s3 cp --acl public-read "$dst" "$latest"
158         fi
159
160         # get hash files too (see hash_files() in hack/make.sh)
161         for hashAlgo in md5 sha256; do
162                 if [ -e "$src.$hashAlgo" ]; then
163                         echo
164                         echo "Uploading $src.$hashAlgo"
165                         echo "  to $dst.$hashAlgo"
166                         echo
167                         $AWS_CLI s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo"
168                         if [ "$latest" ]; then
169                                 echo
170                                 echo "Copying to $latest.$hashAlgo"
171                                 echo
172                                 $AWS_CLI s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo"
173                         fi
174                 fi
175         done
176 }
177
178 release_build() {
179         echo "Releasing binaries"
180         GOOS=$1
181         GOARCH=$2
182
183         binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
184         tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
185         binary=docker-$VERSION
186         zipExt=".tgz"
187         binaryExt=""
188         tgz=$binary$zipExt
189
190         latestBase=
191         if [ -z "$NOLATEST" ]; then
192                 latestBase=docker-latest
193         fi
194
195         # we need to map our GOOS and GOARCH to uname values
196         # see https://en.wikipedia.org/wiki/Uname
197         # ie, GOOS=linux -> "uname -s"=Linux
198
199         s3Os=$GOOS
200         case "$s3Os" in
201                 darwin)
202                         s3Os=Darwin
203                         ;;
204                 freebsd)
205                         s3Os=FreeBSD
206                         ;;
207                 linux)
208                         s3Os=Linux
209                         ;;
210                 solaris)
211                         echo skipping solaris release
212                         return 0
213                         ;;
214                 windows)
215                         # this is windows use the .zip and .exe extensions for the files.
216                         s3Os=Windows
217                         zipExt=".zip"
218                         binaryExt=".exe"
219                         tgz=$binary$zipExt
220                         binary+=$binaryExt
221                         ;;
222                 *)
223                         echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
224                         exit 1
225                         ;;
226         esac
227
228         s3Arch=$GOARCH
229         case "$s3Arch" in
230                 amd64)
231                         s3Arch=x86_64
232                         ;;
233                 386)
234                         s3Arch=i386
235                         ;;
236                 arm)
237                         s3Arch=armel
238                         # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
239                         ;;
240                 *)
241                         echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
242                         exit 1
243                         ;;
244         esac
245
246         s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
247         # latest=
248         latestTgz=
249         if [ "$latestBase" ]; then
250                 # commented out since we aren't uploading binaries right now.
251                 # latest="$s3Dir/$latestBase$binaryExt"
252                 # we don't include the $binaryExt because we don't want docker.exe.zip
253                 latestTgz="$s3Dir/$latestBase$zipExt"
254         fi
255
256         if [ ! -f "$tgzDir/$tgz" ]; then
257                 echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
258                 exit 1
259         fi
260         # disable binary uploads for now. Only providing tgz downloads
261         # upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
262         upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
263 }
264
265 # Upload binaries and tgz files to S3
266 release_binaries() {
267         [ "$(find bundles/$VERSION -path "bundles/$VERSION/cross/*/*/docker-$VERSION")" != "" ] || {
268                 echo >&2 './hack/make.sh must be run before release_binaries'
269                 exit 1
270         }
271
272         for d in bundles/$VERSION/cross/*/*; do
273                 GOARCH="$(basename "$d")"
274                 GOOS="$(basename "$(dirname "$d")")"
275                 release_build "$GOOS" "$GOARCH"
276         done
277
278         # TODO create redirect from builds/*/i686 to builds/*/i386
279
280         cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
281 # To install, run the following commands as root:
282 curl -fsSLO $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && tar --strip-components=1 -xvzf docker-$VERSION.tgz -C /usr/local/bin
283
284 # Then start docker in daemon mode:
285 /usr/local/bin/dockerd
286 EOF
287
288         # Add redirect at /builds/info for URL-backwards-compatibility
289         rm -rf /tmp/emptyfile && touch /tmp/emptyfile
290         $AWS_CLI s3 cp --acl public-read --website-redirect '/builds/' --content-type='text/plain' /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
291
292         if [ -z "$NOLATEST" ]; then
293                 echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
294                 echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
295         fi
296 }
297
298 # Upload the index script
299 release_index() {
300         echo "Releasing index"
301         url="$(s3_url)/" hack/make.sh install-script
302         write_to_s3 "s3://$BUCKET_PATH/index" < "bundles/$VERSION/install-script/install.sh"
303 }
304
305 main() {
306         [ "$SKIP_RELEASE_BUILD" = '1' ] || build_all
307         setup_s3
308         release_binaries
309         release_index
310 }
311
312 main
313
314 echo
315 echo
316 echo "Release complete; see $(s3_url)"
317 echo "Use the following text to announce the release:"
318 echo
319 echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
320 echo
321 echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
322 echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
323 echo "Windows 64bit zip: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
324 echo "Windows 32bit client zip: $(s3_url)/builds/Windows/i386/docker-$VERSION.zip"
325 echo