aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
authorGravatar Sam Guymer <sam@guymer.me>2016-06-08 11:26:46 +0000
committerGravatar Yun Peng <pcloudy@google.com>2016-06-08 11:56:53 +0000
commit6e3e48ee51e8174f89da853d3618baa87d7ae812 (patch)
tree53c1cac6286973dec569e0d55bdb1c40c52ea959 /tools
parent08820c76246b249d52dc35af941ea52c2ffb1320 (diff)
Update 'docker_build' to support 1.10 image format
Docker 1.10 updated the format of images moving layers to just being tarballs referenced by a configuration file. A new manifest.json file aggregates images and handles parent and tagging references. Layers and images are now identified by their sha256 hash. An image configuration file must reference all layers that belong to it by this identifier, including all layers in any parent images. Image configuration is generated the same way but now allows multiple layer sha256 hashes to be provided. The base image configuration is read to find config defaults and the layer identifiers that need to be present. Image creation now requires the layer identifier and file and can accept multiple layers. A manifest with a single entry is created that points at the image configuration, its layers and tags. If a base image is provided its layers are added to the begining of the layer section and a parent reference to the base image is added. Multiple tags can be provided which are applied when the image is loaded. The joining of partial images now consists of merging their contents minus the manifest which is concatentated together. These changes have been made in a backwards compatible way so versions of docker below 1.10 will still work as before. Fixes #1113 -- Change-Id: I0075decc48d8846ad16431948192db196ad702ee Reviewed-on: https://bazel-review.googlesource.com/3730 MOS_MIGRATED_REVID=124339578
Diffstat (limited to 'tools')
-rw-r--r--tools/build_defs/docker/BUILD43
-rw-r--r--tools/build_defs/docker/README.md27
-rwxr-xr-xtools/build_defs/docker/build_test.sh2
-rwxr-xr-xtools/build_defs/docker/build_test_oci.sh493
-rw-r--r--tools/build_defs/docker/create_image.py164
-rw-r--r--tools/build_defs/docker/create_image_config.py241
-rw-r--r--tools/build_defs/docker/create_image_config_test.py603
-rw-r--r--tools/build_defs/docker/docker.bzl147
-rw-r--r--tools/build_defs/docker/incremental_load.sh.tpl27
-rw-r--r--tools/build_defs/docker/join_layers.py14
-rw-r--r--tools/build_defs/docker/rewrite_json.py29
-rw-r--r--tools/build_defs/docker/testdata/BUILD12
-rwxr-xr-xtools/build_defs/docker/testenv.sh4
-rw-r--r--tools/build_defs/docker/utils.py84
14 files changed, 1770 insertions, 120 deletions
diff --git a/tools/build_defs/docker/BUILD b/tools/build_defs/docker/BUILD
index b774fee2e5..db6a375654 100644
--- a/tools/build_defs/docker/BUILD
+++ b/tools/build_defs/docker/BUILD
@@ -29,6 +29,7 @@ TEST_TARGETS = [
"with_double_env",
"with_label",
"with_double_label",
+ "with_user",
"workdir_with_tar_base",
"link_with_files_base",
]
@@ -61,13 +62,28 @@ sh_test(
],
)
+sh_test(
+ name = "build_test_oci",
+ size = "medium",
+ srcs = [
+ "build_test_oci.sh",
+ ],
+ data = TEST_DATA + ["testenv.sh"],
+ deps = [
+ "//src/test/shell:bashunit",
+ ],
+)
+
# Used by docker_build and friends
py_binary(
name = "rewrite_json",
srcs = ["rewrite_json.py"],
visibility = ["//visibility:public"],
- deps = ["//third_party/py/gflags"],
+ deps = [
+ ":utils",
+ "//third_party/py/gflags",
+ ],
)
py_test(
@@ -79,6 +95,24 @@ py_test(
)
py_binary(
+ name = "create_image_config",
+ srcs = ["create_image_config.py"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":utils",
+ "//third_party/py/gflags",
+ ],
+)
+
+py_test(
+ name = "create_image_config_test",
+ srcs = ["create_image_config_test.py"],
+ deps = [
+ ":create_image_config",
+ ],
+)
+
+py_binary(
name = "sha256",
srcs = ["sha256.py"],
visibility = ["//visibility:public"],
@@ -89,6 +123,7 @@ py_binary(
srcs = ["create_image.py"],
visibility = ["//visibility:public"],
deps = [
+ ":utils",
"//third_party/py/gflags",
"//tools/build_defs/pkg:archive",
],
@@ -99,11 +134,17 @@ py_binary(
srcs = ["join_layers.py"],
visibility = ["//visibility:public"],
deps = [
+ ":utils",
"//third_party/py/gflags",
"//tools/build_defs/pkg:archive",
],
)
+py_library(
+ name = "utils",
+ srcs = ["utils.py"],
+)
+
filegroup(
name = "incremental_load_template",
srcs = ["incremental_load.sh.tpl"],
diff --git a/tools/build_defs/docker/README.md b/tools/build_defs/docker/README.md
index 33cbd5950f..071032da2f 100644
--- a/tools/build_defs/docker/README.md
+++ b/tools/build_defs/docker/README.md
@@ -74,9 +74,10 @@ docker_build(
)
```
-## Metadata
+## Image Configuration
-You can set layer metadata on these same rules by simply adding (supported) arguments to the rule, for instance:
+You can set image configuration on these same rules by simply adding (supported) arguments to the
+rule, for instance:
```python
docker_build(
@@ -92,7 +93,8 @@ Will have a similar effect as the Dockerfile construct:
ENTRYPOINT ["foo", "bar", "baz"]
```
-For the set of supported metadata, and ways to construct layers, see here.
+For the set of supported configuration options see
+[here](https://github.com/opencontainers/image-spec/blob/v0.2.0/serialization.md)
### Using
@@ -132,7 +134,7 @@ bazel run my/image:helloworld gcr.io/my-project/my-awesome-image:v0.9
```
__Nota Bene:__ the `docker images` command will show a really old timestamp
-because `docker_build` remove all timestamps from the build to make it
+because `docker_build` removes all timestamps from the build to make it
reproducible.
## Pulling images and deb files from the internet
@@ -363,11 +365,22 @@ docker_build(name, base, data_path, directory, files, mode, tars, debs, symlinks
</td>
</tr>
<tr>
+ <td><code>user</code></td>
+ <td>
+ <code>String, optional</code>
+ <p><a href="https://docs.docker.com/reference/builder/#user">The user
+ that the image should run as.</a></p>
+ <p>Because building the image never happens inside a docker container,
+ this user does not affect the other actions (e.g.,
+ adding files).</p>
+ </td>
+ </tr>
+ <tr>
<td><code>entrypoint</code></td>
<td>
<code>String or string list, optional</code>
<p><a href="https://docs.docker.com/reference/builder/#entrypoint">List
- of entrypoints to add in the layer.</a></p>
+ of entrypoints to add in the image.</a></p>
</td>
</tr>
<tr>
@@ -375,7 +388,7 @@ docker_build(name, base, data_path, directory, files, mode, tars, debs, symlinks
<td>
<code>String or string list, optional</code>
<p><a href="https://docs.docker.com/reference/builder/#cmd">List
- of commands to execute in the layer.</a></p>
+ of commands to execute in the image.</a></p>
</td>
</tr>
<tr>
@@ -436,7 +449,7 @@ docker_build(name, base, data_path, directory, files, mode, tars, debs, symlinks
<code>String, optional</code>
<p><a href="https://docs.docker.com/reference/builder/#workdir">Initial
working directory when running the docker image.</a></p>
- <p>Because building the image never happen inside a docker container,
+ <p>Because building the image never happens inside a docker container,
this working directory does not affect the other actions (e.g.,
adding files).</p>
</td>
diff --git a/tools/build_defs/docker/build_test.sh b/tools/build_defs/docker/build_test.sh
index 785b39925c..38d3e09210 100755
--- a/tools/build_defs/docker/build_test.sh
+++ b/tools/build_defs/docker/build_test.sh
@@ -170,7 +170,7 @@ function check_layers_aux() {
local listing="$(tar xOf "${test_data}" "./${layer}/layer.tar" | tar tv)"
# Check that all files in the layer, if any, have the magic timestamp
- check_eq "$(echo "${listing}" | grep -Fv "${MAGIC_TIMESTAMP}")" ""
+ check_eq "$(echo "${listing}" | grep -Fv "${MAGIC_TIMESTAMP}" || true)" ""
check_id "${input}" "${layer}" "\"${layer}\""
diff --git a/tools/build_defs/docker/build_test_oci.sh b/tools/build_defs/docker/build_test_oci.sh
new file mode 100755
index 0000000000..fd7e73bd50
--- /dev/null
+++ b/tools/build_defs/docker/build_test_oci.sh
@@ -0,0 +1,493 @@
+#!/bin/bash
+
+# Copyright 2016 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Unit tests for docker_build
+
+DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+source ${DIR}/testenv.sh || { echo "testenv.sh not found!" >&2; exit 1; }
+
+readonly PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
+if [ "${PLATFORM}" = "darwin" ]; then
+ readonly MAGIC_TIMESTAMP="$(date -r 0 "+%b %e %Y")"
+else
+ readonly MAGIC_TIMESTAMP="$(date --date=@0 "+%F %R")"
+fi
+
+function EXPECT_CONTAINS() {
+ local complete="${1}"
+ local substring="${2}"
+ local message="${3:-Expected '${substring}' not found in '${complete}'}"
+
+ echo "${complete}" | grep -Fsq -- "${substring}" \
+ || fail "$message"
+}
+
+function check_property() {
+ local property="${1}"
+ local tarball="${2}"
+ local image="${3}"
+ local expected="${4}"
+ local test_data="${TEST_DATA_DIR}/${tarball}.tar"
+
+ local config="$(tar xOf "${test_data}" "./${image}.json")"
+
+ # This would be much more accurate if we had 'jq' everywhere.
+ EXPECT_CONTAINS "${config}" "\"${property}\": ${expected}"
+}
+
+function check_no_property() {
+ local property="${1}"
+ local tarball="${2}"
+ local image="${3}"
+ local test_data="${TEST_DATA_DIR}/${tarball}.tar"
+
+ tar xOf "${test_data}" "./${image}.json" >$TEST_log
+ expect_not_log "\"${property}\":"
+}
+
+function check_entrypoint() {
+ input="$1"
+ shift
+ check_property Entrypoint "${input}" "${@}"
+}
+
+function check_cmd() {
+ input="$1"
+ shift
+ check_property Cmd "${input}" "${@}"
+}
+
+function check_ports() {
+ input="$1"
+ shift
+ check_property ExposedPorts "${input}" "${@}"
+}
+
+function check_volumes() {
+ input="$1"
+ shift
+ check_property Volumes "${input}" "${@}"
+}
+
+function check_env() {
+ input="$1"
+ shift
+ check_property Env "${input}" "${@}"
+}
+
+function check_label() {
+ input="$1"
+ shift
+ check_property Label "${input}" "${@}"
+}
+
+function check_workdir() {
+ input="$1"
+ shift
+ check_property WorkingDir "${input}" "${@}"
+}
+
+function check_user() {
+ input="$1"
+ shift
+ check_property User "${input}" "${@}"
+}
+
+function check_images() {
+ local input="$1"
+ shift 1
+ local expected_images=(${*})
+ local test_data="${TEST_DATA_DIR}/${input}.tar"
+
+ local manifest="$(tar xOf "${test_data}" "./manifest.json")"
+ local manifest_images=(
+ $(echo "${manifest}" | grep -Eo '"Config":[[:space:]]*"[^"]+"' \
+ | sed -r -e 's#"Config":.*?"([0-9a-f]+)\.json"#\1#'))
+
+ local manifest_parents=(
+ $(echo "${manifest}" | grep -Eo '"Parent":[[:space:]]*"[^"]+"' \
+ | sed -r -e 's#"Parent":.*?"sha256:([0-9a-f]+)"#\1#'))
+
+ # Verbose output for testing.
+ echo Expected: "${expected_images[@]}"
+ echo Actual: "${manifest_images[@]}"
+ echo Parents: "${manifest_parents[@]}"
+
+ check_eq "${#expected_images[@]}" "${#manifest_images[@]}"
+
+ local index=0
+ while [ "${index}" -lt "${#expected_images[@]}" ]
+ do
+ # Check that the nth sorted layer matches
+ check_eq "${expected_images[$index]}" "${manifest_images[$index]}"
+
+ index=$((index + 1))
+ done
+
+ # Check that the image contains its predecessor as its parent in the manifest.
+ check_eq "${#manifest_parents[@]}" "$((${#manifest_images[@]} - 1))"
+
+ local index=0
+ while [ "${index}" -lt "${#manifest_parents[@]}" ]
+ do
+ # Check that the nth sorted layer matches
+ check_eq "${manifest_parents[$index]}" "${manifest_images[$index]}"
+
+ index=$((index + 1))
+ done
+}
+
+# The bottom manifest entry must contain all layers in order
+function check_image_manifest_layers() {
+ local input="$1"
+ shift 1
+ local expected_layers=(${*})
+ local test_data="${TEST_DATA_DIR}/${input}.tar"
+
+ local manifest="$(tar xOf "${test_data}" "./manifest.json")"
+ local manifest_layers=(
+ $(echo "${manifest}" | grep -Eo '"Layers":[[:space:]]*\[[^]]+\]' \
+ | grep -Eo '\[.+\]' | tail -n 1 | tr ',' '\n' \
+ | sed -r -e 's#.*"([0-9a-f]+)/layer\.tar".*#\1#'))
+
+ # Verbose output for testing.
+ echo Expected: "${expected_layers[@]}"
+ echo Actual: "${manifest_layers[@]}"
+
+ check_eq "${#expected_layers[@]}" "${#manifest_layers[@]}"
+
+ local index=0
+ while [ "${index}" -lt "${#expected_layers[@]}" ]
+ do
+ # Check that the nth sorted layer matches
+ check_eq "${expected_layers[$index]}" "${manifest_layers[$index]}"
+
+ index=$((index + 1))
+ done
+}
+
+function check_layers_aux() {
+ local input="$1"
+ shift 1
+ local expected_layers=(${*})
+
+ local expected_layers_sorted=(
+ $(for i in ${expected_layers[*]}; do echo $i; done | sort)
+ )
+ local test_data="${TEST_DATA_DIR}/${input}.tar"
+
+ # Verbose output for testing.
+ tar tvf "${test_data}"
+
+ local actual_layers=(
+ $(tar tvf ${test_data} | tr -s ' ' | cut -d' ' -f 4- | sort \
+ | cut -d'/' -f 2 | grep -E '^[0-9a-f]+$' | sort | uniq))
+
+ # Verbose output for testing.
+ echo Expected: "${expected_layers_sorted[@]}"
+ echo Actual: "${actual_layers[@]}"
+
+ check_eq "${#expected_layers[@]}" "${#actual_layers[@]}"
+
+ local index=0
+ while [ "${index}" -lt "${#expected_layers[@]}" ]
+ do
+ # Check that the nth sorted layer matches
+ check_eq "${expected_layers_sorted[$index]}" "${actual_layers[$index]}"
+
+ # Grab the ordered layer and check it.
+ local layer="${expected_layers[$index]}"
+
+ # Verbose output for testing.
+ echo Checking layer: "${layer}"
+
+ local listing="$(tar xOf "${test_data}" "./${layer}/layer.tar" | tar tv)"
+
+ # Check that all files in the layer, if any, have the magic timestamp
+ check_eq "$(echo "${listing}" | grep -Fv "${MAGIC_TIMESTAMP}" || true)" ""
+
+ index=$((index + 1))
+ done
+}
+
+function check_layers() {
+ local input="$1"
+ shift
+ check_layers_aux "$input" "$@"
+ check_image_manifest_layers "$input" "$@"
+}
+
+function test_gen_image() {
+ grep -Fsq "./gen.out" "$TEST_DATA_DIR/gen_image.tar" \
+ || fail "'./gen.out' not found in '$TEST_DATA_DIR/gen_image.tar'"
+}
+
+function test_dummy_repository() {
+ local layer="0279f3ce8b08d10506abcf452393b3e48439f5eca41b836fae59a0d509fbafea"
+ local test_data="${TEST_DATA_DIR}/dummy_repository.tar"
+ check_layers_aux "dummy_repository" "$layer"
+}
+
+function test_files_base() {
+ check_layers "files_base" \
+ "82ca3945f7d07df82f274d7fafe83fd664c2154e5c64c988916ccd5b217bb710"
+}
+
+function test_files_with_files_base() {
+ check_layers "files_with_files_base" \
+ "82ca3945f7d07df82f274d7fafe83fd664c2154e5c64c988916ccd5b217bb710" \
+ "84c0d09919ae8b06cb6b064d8cd5eab63341a46f11ccc7ecbe270ad3e1f52744"
+}
+
+function test_tar_base() {
+ check_layers "tar_base" \
+ "8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277"
+
+ # Check that this layer doesn't have any entrypoint data by looking
+ # for *any* entrypoint.
+ check_no_property "Entrypoint" "tar_base" \
+ "9fec194fd32c03350d6a6e60ee8ed7862471e8817aaa310306d9be6242b05d20"
+}
+
+function test_tar_with_tar_base() {
+ check_layers "tar_with_tar_base" \
+ "8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277" \
+ "1cc81a2aaec2e3727d98d48bf9ba09d3ac96ef48adf5edae861d15dd0191dc40"
+}
+
+function test_directory_with_tar_base() {
+ check_layers "directory_with_tar_base" \
+ "8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277" \
+ "e56ddeb8279698484f50d480f71cb5380223ad0f451766b7b9a9348129d02542"
+}
+
+function test_files_with_tar_base() {
+ check_layers "files_with_tar_base" \
+ "8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277" \
+ "f099727fa58f9b688e77b511b3cc728b86ae0e84d197b9330bd51082ad5589f2"
+}
+
+function test_workdir_with_tar_base() {
+ check_layers "workdir_with_tar_base" \
+ "8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277" \
+ "f24cbe53bd1b78909c6dba0bd47016354f3488b35b85aeee68ecc423062b927e"
+}
+
+function test_tar_with_files_base() {
+ check_layers "tar_with_files_base" \
+ "82ca3945f7d07df82f274d7fafe83fd664c2154e5c64c988916ccd5b217bb710" \
+ "bee1a325e4b51a1dcfd7e447987b4e130590815865ab22e8744878053d525f20"
+}
+
+function test_base_with_entrypoint() {
+ check_layers "base_with_entrypoint" \
+ "4acbeb0495918726c0107e372b421e1d2a6fd4825d58fc3f0b0b2a719fb3ce1b"
+
+ check_entrypoint "base_with_entrypoint" \
+ "d59ab78d94f88b906227b8696d3065b91c71a1c6045d5103f3572c1e6fe9a1a9" \
+ '["/bar"]'
+
+ # Check that the base layer has a port exposed.
+ check_ports "base_with_entrypoint" \
+ "d59ab78d94f88b906227b8696d3065b91c71a1c6045d5103f3572c1e6fe9a1a9" \
+ '{"8080/tcp": {}}'
+}
+
+function test_derivative_with_shadowed_cmd() {
+ check_layers "derivative_with_shadowed_cmd" \
+ "4acbeb0495918726c0107e372b421e1d2a6fd4825d58fc3f0b0b2a719fb3ce1b" \
+ "e35f57dc6c1e84ae67dcaaf3479a3a3c0f52ac4d194073bd6214e04c05beab42"
+}
+
+function test_derivative_with_cmd() {
+ check_layers "derivative_with_cmd" \
+ "4acbeb0495918726c0107e372b421e1d2a6fd4825d58fc3f0b0b2a719fb3ce1b" \
+ "e35f57dc6c1e84ae67dcaaf3479a3a3c0f52ac4d194073bd6214e04c05beab42" \
+ "186289545131e34510006ac79498078dcf41736a5eb9a36920a6b30d3f45bc01"
+
+ check_images "derivative_with_cmd" \
+ "d59ab78d94f88b906227b8696d3065b91c71a1c6045d5103f3572c1e6fe9a1a9" \
+ "a37fcc5dfa513987ecec8a19ebe5d17568a7d6e696771c596b110fcc30a2d8a6" \
+ "d3ea6e7cfc3e182a8ca43081db1e145f1bee8c5da5627639800c76abf61b5165"
+
+ check_entrypoint "derivative_with_cmd" \
+ "d59ab78d94f88b906227b8696d3065b91c71a1c6045d5103f3572c1e6fe9a1a9" \
+ '["/bar"]'
+
+ # Check that the middle image has our shadowed arg.
+ check_cmd "derivative_with_cmd" \
+ "a37fcc5dfa513987ecec8a19ebe5d17568a7d6e696771c596b110fcc30a2d8a6" \
+ '["shadowed-arg"]'
+
+ # Check that our topmost image excludes the shadowed arg.
+ check_cmd "derivative_with_cmd" \
+ "d3ea6e7cfc3e182a8ca43081db1e145f1bee8c5da5627639800c76abf61b5165" \
+ '["arg1", "arg2"]'
+
+ # Check that the topmost layer has the ports exposed by the bottom
+ # layer, and itself.
+ check_ports "derivative_with_cmd" \
+ "d3ea6e7cfc3e182a8ca43081db1e145f1bee8c5da5627639800c76abf61b5165" \
+ '{"80/tcp": {}, "8080/tcp": {}}'
+}
+
+function test_derivative_with_volume() {
+ check_layers "derivative_with_volume" \
+ "125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
+ "08424283ad3a7e020e210bec22b166d7ebba57f7ba2d0713c2fd7bd1e2038f88"
+
+ check_images "derivative_with_volume" \
+ "da0f0e314eb3187877754fd5ee1e487b93c13dbabdba18f35d130324f3c9b76d" \
+ "c872bf3f4c7eb5a01ae7ad6fae4c25e86ff2923bb1fe29be5edcdff1b31ed71a"
+
+ # Check that the topmost layer has the ports exposed by the bottom
+ # layer, and itself.
+ check_volumes "derivative_with_volume" \
+ "da0f0e314eb3187877754fd5ee1e487b93c13dbabdba18f35d130324f3c9b76d" \
+ '{"/logs": {}}'
+
+ check_volumes "derivative_with_volume" \
+ "c872bf3f4c7eb5a01ae7ad6fae4c25e86ff2923bb1fe29be5edcdff1b31ed71a" \
+ '{"/asdf": {}, "/blah": {}, "/logs": {}}'
+}
+
+function test_generated_tarball() {
+ check_layers "generated_tarball" \
+ "54b8328604115255cc76c12a2a51939be65c40bf182ff5a898a5fb57c38f7772"
+}
+
+function test_with_env() {
+ check_layers "with_env" \
+ "125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
+ "42a1bd0f449f61a23b8a7776875ffb6707b34ee99c87d6428a7394f5e55e8624"
+
+ check_env "with_env" \
+ "87c0d91841f92847ec6c183810f720e5926dba0652eb5d52a807366825dd21c7" \
+ '["bar=blah blah blah", "foo=/asdf"]'
+}
+
+function test_with_double_env() {
+ check_layers "with_double_env" \
+ "125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
+ "42a1bd0f449f61a23b8a7776875ffb6707b34ee99c87d6428a7394f5e55e8624" \
+ "576a9fd9c690be04dc7aacbb9dbd1f14816e32dbbcc510f4d42325bbff7163dd"
+
+ # Check both the aggregation and the expansion of embedded variables.
+ check_env "with_double_env" \
+ "273d2a6cfc25001baf9d3f7c68770ec79a1671b8249d153e7611a4f80165ecda" \
+ '["bar=blah blah blah", "baz=/asdf blah blah blah", "foo=/asdf"]'
+}
+
+function test_with_label() {
+ check_layers "with_label" \
+ "125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
+ "eba6abda3d259ab6ed5f4d48b76df72a5193fad894d4ae78fbf0a363d8f9e8fd"
+
+ check_label "with_label" \
+ "83c007425faff33ac421329af9f6444b7250abfc12c28f188b47e97fb715c006" \
+ '["com.example.bar={\"name\": \"blah\"}", "com.example.baz=qux", "com.example.foo={\"name\": \"blah\"}"]'
+}
+
+function test_with_double_label() {
+ check_layers "with_double_label" \
+ "125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
+ "eba6abda3d259ab6ed5f4d48b76df72a5193fad894d4ae78fbf0a363d8f9e8fd" \
+ "bfe88fbb5e24fc5bff138f7a1923d53a2ee1bbc8e54b6f5d9c371d5f48b6b023"
+
+ check_label "with_double_label" \
+ "8cfc89c83adf947cd2c18c11579559f1f48cf375a20364ec79eb14d6580dbf75" \
+ '["com.example.bar={\"name\": \"blah\"}", "com.example.baz=qux", "com.example.foo={\"name\": \"blah\"}", "com.example.qux={\"name\": \"blah-blah\"}"]'
+}
+
+function test_with_user() {
+ check_user "with_user" \
+ "bd6666bdde7d4a837a0685d2861822507119f7f6e565acecbbbe93f1d0cc1974" \
+ "\"nobody\""
+}
+
+function get_layer_listing() {
+ local input=$1
+ local layer=$2
+ local test_data="${TEST_DATA_DIR}/${input}.tar"
+ tar xOf "${test_data}" \
+ "./${layer}/layer.tar" | tar tv | sed -e 's/^.*:00 //'
+}
+
+function test_data_path() {
+ local no_data_path_sha="451d182e5c71840f00ba9726dc0239db73a21b7e89e79c77f677e3f7c5c23d44"
+ local data_path_sha="9a41c9e1709558f7ef06f28f66e9056feafa7e0f83990801e1b27c987278d8e8"
+ local absolute_data_path_sha="f196c42ab4f3eb850d9655b950b824db2c99c01527703ac486a7b48bb2a34f44"
+ local root_data_path_sha="19d7fd26d67bfaeedd6232dcd441f14ee163bc81c56ed565cc20e73311c418b6"
+
+ check_layers_aux "no_data_path_image" "${no_data_path_sha}"
+ check_layers_aux "data_path_image" "${data_path_sha}"
+ check_layers_aux "absolute_data_path_image" "${absolute_data_path_sha}"
+ check_layers_aux "root_data_path_image" "${root_data_path_sha}"
+
+ # Without data_path = "." the file will be inserted as `./test`
+ # (since it is the path in the package) and with data_path = "."
+ # the file will be inserted relatively to the testdata package
+ # (so `./test/test`).
+ check_eq "$(get_layer_listing "no_data_path_image" "${no_data_path_sha}")" \
+ './
+./test'
+ check_eq "$(get_layer_listing "data_path_image" "${data_path_sha}")" \
+ './
+./test/
+./test/test'
+
+ # With an absolute path for data_path, we should strip that prefix
+ # from the files' paths. Since the testdata images are in
+ # //tools/build_defs/docker/testdata and data_path is set to
+ # "/tools/build_defs", we should have `docker` as the top-level
+ # directory.
+ check_eq "$(get_layer_listing "absolute_data_path_image" "${absolute_data_path_sha}")" \
+ './
+./docker/
+./docker/testdata/
+./docker/testdata/test/
+./docker/testdata/test/test'
+
+ # With data_path = "/", we expect the entire path from the repository
+ # root.
+ check_eq "$(get_layer_listing "root_data_path_image" "${root_data_path_sha}")" \
+ "./
+./tools/
+./tools/build_defs/
+./tools/build_defs/docker/
+./tools/build_defs/docker/testdata/
+./tools/build_defs/docker/testdata/test/
+./tools/build_defs/docker/testdata/test/test"
+}
+
+function test_extras_with_deb() {
+ local test_data="${TEST_DATA_DIR}/extras_with_deb.tar"
+ local sha=$(tar xOf ${test_data} ./top)
+
+ # The content of the layer should have no duplicate
+ local layer_listing="$(get_layer_listing "extras_with_deb" "${sha}" | sort)"
+ check_eq "${layer_listing}" \
+"./
+./etc/
+./etc/nsswitch.conf
+./tmp/
+./usr/
+./usr/bin/
+./usr/bin/java -> /path/to/bin/java
+./usr/titi"
+}
+
+run_suite "build_test_oci"
diff --git a/tools/build_defs/docker/create_image.py b/tools/build_defs/docker/create_image.py
index 040f1b907e..5dda217765 100644
--- a/tools/build_defs/docker/create_image.py
+++ b/tools/build_defs/docker/create_image.py
@@ -13,9 +13,12 @@
# limitations under the License.
"""This tool creates a docker image from a layer and the various metadata."""
+import json
+import re
import sys
import tarfile
+from tools.build_defs.docker import utils
from tools.build_defs.pkg import archive
from third_party.py import gflags
@@ -27,24 +30,30 @@ gflags.DEFINE_string(
'The output file, mandatory')
gflags.MarkFlagAsRequired('output')
-gflags.DEFINE_string(
- 'metadata', None,
- 'The JSON metadata file for this image, mandatory.')
-gflags.MarkFlagAsRequired('metadata')
-
-gflags.DEFINE_string(
- 'layer', None,
- 'The tar file for the top layer of this image, mandatory.')
-gflags.MarkFlagAsRequired('layer')
+gflags.DEFINE_multistring(
+ 'layer', [],
+ 'Layer tar files and their identifiers that make up this image')
gflags.DEFINE_string(
'id', None,
'The hex identifier of this image (hexstring or @filename), mandatory.')
gflags.MarkFlagAsRequired('id')
+gflags.DEFINE_string('config', None,
+ 'The JSON configuration file for this image, mandatory.')
+gflags.MarkFlagAsRequired('config')
+
+gflags.DEFINE_string('base', None, 'The base image file for this image.')
+
gflags.DEFINE_string(
- 'base', None,
- 'The base image file for this image.')
+ 'legacy_id', None,
+ 'The legacy hex identifier of this layer (hexstring or @filename).')
+
+gflags.DEFINE_string('metadata', None,
+ 'The legacy JSON metadata file for this layer.')
+
+gflags.DEFINE_string('legacy_base', None,
+ 'The legacy base image file for this image.')
gflags.DEFINE_string(
'repository', None,
@@ -54,49 +63,108 @@ gflags.DEFINE_string(
'name', None,
'The symbolic name of this image.')
+gflags.DEFINE_multistring('tag', None,
+ 'The repository tags to apply to the image')
+
FLAGS = gflags.FLAGS
def _base_name_filter(name):
"""Do not add multiple times 'top' and 'repositories' when merging images."""
- filter_names = ['top', 'repositories']
+ filter_names = ['top', 'repositories', 'manifest.json']
return all([not name.endswith(s) for s in filter_names])
-def create_image(output, identifier,
- base=None, layer=None, metadata=None,
- name=None, repository=None):
+def create_image(output,
+ identifier,
+ layers,
+ config,
+ tags=None,
+ base=None,
+ legacy_base=None,
+ metadata_id=None,
+ metadata=None,
+ name=None,
+ repository=None):
"""Creates a Docker image.
Args:
output: the name of the docker image file to create.
- identifier: the identifier of the top layer for this image.
- base: a base layer (optional) to merge to current layer.
- layer: the layer content (a tar file).
+ identifier: the identifier for this image (sha256 of the metadata).
+ layers: the layer content (a sha256 and a tar file).
+ config: the configuration file for the image.
+ tags: tags that apply to this image.
+ base: a base layer (optional) to build on top of.
+ legacy_base: a base layer (optional) to build on top of.
+ metadata_id: the identifier of the top layer for this image.
metadata: the json metadata file for the top layer.
name: symbolic name for this docker image.
repository: repository name for this docker image.
"""
tar = archive.TarFileWriter(output)
- # Write our id to 'top' as we are now the topmost layer.
- tar.add_file('top', content=identifier)
- # Each layer is encoded as a directory in the larger tarball of the form:
- # {id}\
- # layer.tar
- # VERSION
- # json
- # Create the directory for us to now fill in.
- tar.add_file(identifier + '/', tarfile.DIRTYPE)
- # VERSION generally seems to contain 1.0, not entirely sure
- # what the point of this is.
- tar.add_file(identifier + '/VERSION', content=DATA_FORMAT_VERSION)
- # Add the layer file
- tar.add_file(identifier + '/layer.tar', file_content=layer)
- # Now the json metadata
- tar.add_file(identifier + '/json', file_content=metadata)
- # Merge the base if any
+
+ # add the image config referenced by the Config section in the manifest
+ # the name can be anything but docker uses the format below
+ config_file_name = identifier + '.json'
+ tar.add_file(config_file_name, file_content=config)
+
+ layer_file_names = []
+
+ if metadata_id:
+ # Write our id to 'top' as we are now the topmost layer.
+ tar.add_file('top', content=metadata_id)
+
+ # Each layer is encoded as a directory in the larger tarball of the form:
+ # {id}\
+ # layer.tar
+ # VERSION
+ # json
+ # Create the directory for us to now fill in.
+ tar.add_file(metadata_id + '/', tarfile.DIRTYPE)
+ # VERSION generally seems to contain 1.0, not entirely sure
+ # what the point of this is.
+ tar.add_file(metadata_id + '/VERSION', content=DATA_FORMAT_VERSION)
+ # Add the layer file
+ layer_file_name = metadata_id + '/layer.tar'
+ layer_file_names.append(layer_file_name)
+ tar.add_file(layer_file_name, file_content=layers[0]['layer'])
+ # Now the json metadata
+ tar.add_file(metadata_id + '/json', file_content=metadata)
+
+ # Merge the base if any
+ if legacy_base:
+ tar.add_tar(legacy_base, name_filter=_base_name_filter)
+ else:
+ for layer in layers:
+ # layers can be called anything, so just name them by their sha256
+ layer_file_name = identifier + '/' + layer['name'] + '.tar'
+ layer_file_names.append(layer_file_name)
+ tar.add_file(layer_file_name, file_content=layer['layer'])
+
+ base_layer_file_names = []
+ parent = None
if base:
- tar.add_tar(base, name_filter=_base_name_filter)
+ latest_item = utils.GetLatestManifestFromTar(base)
+ if latest_item:
+ base_layer_file_names = latest_item.get('Layers', [])
+ config_file = latest_item['Config']
+ parent_search = re.search('^(.+)\\.json$', config_file)
+ if parent_search:
+ parent = parent_search.group(1)
+
+ manifest_item = {
+ 'Config': config_file_name,
+ 'Layers': base_layer_file_names + layer_file_names,
+ 'RepoTags': tags or []
+ }
+ if parent:
+ manifest_item['Parent'] = 'sha256:' + parent
+
+ manifest = [manifest_item]
+
+ manifest_content = json.dumps(manifest, sort_keys=True)
+ tar.add_file('manifest.json', content=manifest_content)
+
# In addition to N layers of the form described above, there is
# a single file at the top of the image called repositories.
# This file contains a JSON blob of the form:
@@ -120,17 +188,25 @@ def create_image(output, identifier,
# create_image --output=output_file \
# --id=@identifier \
# [--base=base] \
-# --layer=layer.tar \
+# --layer=@identifier=layer.tar \
# --metadata=metadata.json \
-# --name=myname --repository=repositoryName
+# --name=myname --repository=repositoryName \
+# --tag=repo/image:tag
# See the gflags declaration about the flags argument details.
def main(unused_argv):
- identifier = FLAGS.id
- if identifier.startswith('@'):
- with open(identifier[1:], 'r') as f:
- identifier = f.read()
- create_image(FLAGS.output, identifier, FLAGS.base,
- FLAGS.layer, FLAGS.metadata,
+ identifier = utils.ExtractValue(FLAGS.id)
+ legacy_id = utils.ExtractValue(FLAGS.legacy_id)
+
+ layers = []
+ for kv in FLAGS.layer:
+ (k, v) = kv.split('=', 1)
+ layers.append({
+ 'name': utils.ExtractValue(k),
+ 'layer': v,
+ })
+
+ create_image(FLAGS.output, identifier, layers, FLAGS.config, FLAGS.tag,
+ FLAGS.base, FLAGS.legacy_base, legacy_id, FLAGS.metadata,
FLAGS.name, FLAGS.repository)
if __name__ == '__main__':
diff --git a/tools/build_defs/docker/create_image_config.py b/tools/build_defs/docker/create_image_config.py
new file mode 100644
index 0000000000..05ce4a684b
--- /dev/null
+++ b/tools/build_defs/docker/create_image_config.py
@@ -0,0 +1,241 @@
+# Copyright 2016 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""This package manipulates OCI image configuration metadata."""
+from collections import namedtuple
+import copy
+import json
+import os
+import os.path
+import sys
+
+from tools.build_defs.docker import utils
+from third_party.py import gflags
+
+gflags.DEFINE_string('base', None, 'The parent image')
+
+gflags.DEFINE_string('output', None, 'The output file to generate')
+gflags.MarkFlagAsRequired('output')
+
+gflags.DEFINE_multistring('layer', [],
+ 'Layer sha256 hashes that make up this image')
+
+gflags.DEFINE_list('entrypoint', None,
+ 'Override the "Entrypoint" of the previous image')
+
+gflags.DEFINE_list('command', None, 'Override the "Cmd" of the previous image')
+
+gflags.DEFINE_string('user', None, 'The username to run commands under')
+
+gflags.DEFINE_list('labels', None, 'Augment the "Label" of the previous image')
+
+gflags.DEFINE_list('ports', None,
+ 'Augment the "ExposedPorts" of the previous image')
+
+gflags.DEFINE_list('volumes', None,
+ 'Augment the "Volumes" of the previous image')
+
+gflags.DEFINE_string('workdir', None, 'Set the working directory for the image')
+
+gflags.DEFINE_list('env', None, 'Augment the "Env" of the previous image')
+
+FLAGS = gflags.FLAGS
+
+_ConfigOptionsT = namedtuple('ConfigOptionsT', ['layers', 'entrypoint', 'cmd',
+ 'env', 'labels', 'ports',
+ 'volumes', 'workdir', 'user'])
+
+
+class ConfigOptions(_ConfigOptionsT):
+ """Docker image configuration options."""
+
+ def __new__(cls,
+ layers=None,
+ entrypoint=None,
+ cmd=None,
+ user=None,
+ labels=None,
+ env=None,
+ ports=None,
+ volumes=None,
+ workdir=None):
+ """Constructor."""
+ return super(ConfigOptions, cls).__new__(cls,
+ layers=layers,
+ entrypoint=entrypoint,
+ cmd=cmd,
+ user=user,
+ labels=labels,
+ env=env,
+ ports=ports,
+ volumes=volumes,
+ workdir=workdir)
+
+_PROCESSOR_ARCHITECTURE = 'amd64'
+
+_OPERATING_SYSTEM = 'linux'
+
+
+def Resolve(value, environment):
+ """Resolves environment variables embedded in the given value."""
+ outer_env = os.environ
+ try:
+ os.environ = environment
+ return os.path.expandvars(value)
+ finally:
+ os.environ = outer_env
+
+
+def DeepCopySkipNull(data):
+ """Do a deep copy, skipping null entry."""
+ if isinstance(data, dict):
+ return dict((DeepCopySkipNull(k), DeepCopySkipNull(v))
+ for k, v in data.iteritems() if v is not None)
+ return copy.deepcopy(data)
+
+
+def KeyValueToDict(pair):
+ """Converts an iterable object of key=value pairs to dictionary."""
+ d = dict()
+ for kv in pair:
+ (k, v) = kv.split('=', 1)
+ d[k] = v
+ return d
+
+
+def CreateImageConfig(data, options):
+ """Create an image config possibly based on an existing one.
+
+ Args:
+ data: A dict of Docker image config to base on top of.
+ options: Options specific to this image which will be merged with any
+ existing data
+
+ Returns:
+ Image config for the new image
+ """
+ defaults = DeepCopySkipNull(data)
+
+ # dont propagate non-spec keys
+ output = dict()
+ output['created'] = '0001-01-01T00:00:00Z'
+ output['author'] = 'Bazel'
+ output['architecture'] = _PROCESSOR_ARCHITECTURE
+ output['os'] = _OPERATING_SYSTEM
+
+ output['config'] = defaults.get('config', {})
+
+ if options.entrypoint:
+ output['config']['Entrypoint'] = options.entrypoint
+ if options.cmd:
+ output['config']['Cmd'] = options.cmd
+ if options.user:
+ output['config']['User'] = options.user
+
+ def Dict2ConfigValue(d):
+ return ['%s=%s' % (k, d[k]) for k in sorted(d.keys())]
+
+ if options.env:
+ # Build a dictionary of existing environment variables (used by Resolve).
+ environ_dict = KeyValueToDict(output['config'].get('Env', []))
+ # Merge in new environment variables, resolving references.
+ for k, v in options.env.iteritems():
+ # Resolve handles scenarios like "PATH=$PATH:...".
+ environ_dict[k] = Resolve(v, environ_dict)
+ output['config']['Env'] = Dict2ConfigValue(environ_dict)
+
+ # TODO(babel-team) Label is currently docker specific
+ if options.labels:
+ label_dict = KeyValueToDict(output['config'].get('Label', []))
+ for k, v in options.labels.iteritems():
+ label_dict[k] = v
+ output['config']['Label'] = Dict2ConfigValue(label_dict)
+
+ if options.ports:
+ if 'ExposedPorts' not in output['config']:
+ output['config']['ExposedPorts'] = {}
+ for p in options.ports:
+ if '/' in p:
+ # The port spec has the form 80/tcp, 1234/udp
+ # so we simply use it as the key.
+ output['config']['ExposedPorts'][p] = {}
+ else:
+ # Assume tcp
+ output['config']['ExposedPorts'][p + '/tcp'] = {}
+
+ if options.volumes:
+ if 'Volumes' not in output['config']:
+ output['config']['Volumes'] = {}
+ for p in options.volumes:
+ output['config']['Volumes'][p] = {}
+
+ if options.workdir:
+ output['config']['WorkingDir'] = options.workdir
+
+ # diff_ids are ordered from bottom-most to top-most
+ diff_ids = defaults.get('rootfs', {}).get('diff_ids', [])
+ layers = options.layers if options.layers else []
+ diff_ids += ['sha256:%s' % l for l in layers]
+ output['rootfs'] = {
+ 'type': 'layers',
+ 'diff_ids': diff_ids,
+ }
+
+ # history is ordered from bottom-most layer to top-most layer
+ history = defaults.get('history', [])
+ # docker only allows the child to have one more history entry than the parent
+ history += [{
+ 'created': '0001-01-01T00:00:00Z',
+ 'created_by': 'bazel build ...',
+ 'author': 'Bazel'}]
+ output['history'] = history
+
+ return output
+
+
+def main(unused_argv):
+ base_json = '{}'
+ manifest = utils.GetLatestManifestFromTar(FLAGS.base)
+ if manifest:
+ config_file = manifest['Config']
+ base_json = utils.GetTarFile(FLAGS.base, config_file)
+ data = json.loads(base_json)
+
+ layers = []
+ for layer in FLAGS.layer:
+ layers.append(utils.ExtractValue(layer))
+
+ labels = KeyValueToDict(FLAGS.labels)
+ for label, value in labels.iteritems():
+ if value.startswith('@'):
+ with open(value[1:], 'r') as f:
+ labels[label] = f.read()
+
+ output = CreateImageConfig(data,
+ ConfigOptions(layers=layers,
+ entrypoint=FLAGS.entrypoint,
+ cmd=FLAGS.command,
+ user=FLAGS.user,
+ labels=labels,
+ env=KeyValueToDict(FLAGS.env),
+ ports=FLAGS.ports,
+ volumes=FLAGS.volumes,
+ workdir=FLAGS.workdir))
+
+ with open(FLAGS.output, 'w') as fp:
+ json.dump(output, fp, sort_keys=True)
+ fp.write('\n')
+
+
+if __name__ == '__main__':
+ main(FLAGS(sys.argv))
diff --git a/tools/build_defs/docker/create_image_config_test.py b/tools/build_defs/docker/create_image_config_test.py
new file mode 100644
index 0000000000..e01a17c398
--- /dev/null
+++ b/tools/build_defs/docker/create_image_config_test.py
@@ -0,0 +1,603 @@
+# Copyright 2016 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Testing for create_image_config."""
+
+import unittest
+
+from tools.build_defs.docker.create_image_config import _OPERATING_SYSTEM
+from tools.build_defs.docker.create_image_config import _PROCESSOR_ARCHITECTURE
+from tools.build_defs.docker.create_image_config import ConfigOptions
+from tools.build_defs.docker.create_image_config import CreateImageConfig
+
+
+class CreateImageConfigTest(unittest.TestCase):
+ """Testing for create_image_config."""
+
+ base_expected = {
+ 'created': '0001-01-01T00:00:00Z',
+ 'author': 'Bazel',
+ 'architecture': _PROCESSOR_ARCHITECTURE,
+ 'os': _OPERATING_SYSTEM,
+ 'config': {},
+ 'rootfs': {'diff_ids': [],
+ 'type': 'layers'},
+ 'history': [{'author': 'Bazel',
+ 'created': '0001-01-01T00:00:00Z',
+ 'created_by': 'bazel build ...'}],
+ }
+
+ def testNewUser(self):
+ in_data = {'config': {'WorkingDir': '/usr/home/mattmoor'}}
+ user = 'mattmoor'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(user=user))
+ self.assertEquals(expected, actual)
+
+ def testOverrideUser(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ }
+ }
+ user = 'mattmoor2'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor2',
+ 'WorkingDir': '/usr/home/mattmoor',
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(user=user))
+ self.assertEquals(expected, actual)
+
+ def testNewEntrypoint(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor'
+ }
+ }
+ entrypoint = ['/bin/bash']
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Entrypoint': entrypoint
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(entrypoint=entrypoint))
+ self.assertEquals(expected, actual)
+
+ def testOverrideEntrypoint(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Entrypoint': ['/bin/sh', 'does', 'not', 'matter'],
+ }
+ }
+ entrypoint = ['/bin/bash']
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Entrypoint': entrypoint
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(entrypoint=entrypoint))
+ self.assertEquals(expected, actual)
+
+ def testNewCmd(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Entrypoint': ['/bin/bash'],
+ }
+ }
+ cmd = ['/bin/bash']
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Entrypoint': ['/bin/bash'],
+ 'Cmd': cmd
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(cmd=cmd))
+ self.assertEquals(expected, actual)
+
+ def testOverrideCmd(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Entrypoint': ['/bin/bash'],
+ 'Cmd': ['does', 'not', 'matter'],
+ }
+ }
+ cmd = ['does', 'matter']
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Entrypoint': ['/bin/bash'],
+ 'Cmd': cmd
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(cmd=cmd))
+ self.assertEquals(expected, actual)
+
+ def testOverrideBoth(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Entrypoint': ['/bin/sh'],
+ 'Cmd': ['does', 'not', 'matter'],
+ }
+ }
+ entrypoint = ['/bin/bash', '-c']
+ cmd = ['my-command', 'my-arg1', 'my-arg2']
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Entrypoint': entrypoint,
+ 'Cmd': cmd
+ },
+ })
+
+ actual = CreateImageConfig(in_data,
+ ConfigOptions(entrypoint=entrypoint,
+ cmd=cmd))
+ self.assertEquals(expected, actual)
+
+ def testStripContainerConfig(self):
+ in_data = {'container_config': {},}
+ expected = self.base_expected.copy()
+
+ actual = CreateImageConfig(in_data, ConfigOptions())
+ self.assertEquals(expected, actual)
+
+ def testEmptyBase(self):
+ in_data = {}
+ entrypoint = ['/bin/bash', '-c']
+ cmd = ['my-command', 'my-arg1', 'my-arg2']
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'Entrypoint': entrypoint,
+ 'Cmd': cmd,
+ 'ExposedPorts': {
+ '80/tcp': {}
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data,
+ ConfigOptions(entrypoint=entrypoint,
+ cmd=cmd,
+ ports=['80']))
+ self.assertEquals(expected, actual)
+
+ def testNewPort(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor'
+ }
+ }
+ port = '80'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'ExposedPorts': {
+ port + '/tcp': {}
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(ports=[port]))
+ self.assertEquals(expected, actual)
+
+ def testAugmentPort(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'ExposedPorts': {
+ '443/tcp': {}
+ }
+ }
+ }
+ port = '80'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'ExposedPorts': {
+ '443/tcp': {},
+ port + '/tcp': {}
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(ports=[port]))
+ self.assertEquals(expected, actual)
+
+ def testMultiplePorts(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor'
+ }
+ }
+ port1 = '80'
+ port2 = '8080'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'ExposedPorts': {
+ port1 + '/tcp': {},
+ port2 + '/tcp': {}
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(ports=[port1, port2]))
+ self.assertEquals(expected, actual)
+
+ def testPortCollision(self):
+ port = '80'
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'ExposedPorts': {
+ port + '/tcp': {}
+ }
+ }
+ }
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'ExposedPorts': {
+ port + '/tcp': {}
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(ports=[port]))
+ self.assertEquals(expected, actual)
+
+ def testPortWithProtocol(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor'
+ }
+ }
+ port = '80/tcp'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'ExposedPorts': {
+ port: {}
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(ports=[port]))
+ self.assertEquals(expected, actual)
+
+ def testNewVolume(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor'
+ }
+ }
+ volume = '/logs'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Volumes': {
+ volume: {}
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(volumes=[volume]))
+ self.assertEquals(expected, actual)
+
+ def testAugmentVolume(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Volumes': {
+ '/original': {}
+ }
+ }
+ }
+ volume = '/data'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Volumes': {
+ '/original': {},
+ volume: {}
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(volumes=[volume]))
+ self.assertEquals(expected, actual)
+
+ def testMultipleVolumes(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor'
+ }
+ }
+ volume1 = '/input'
+ volume2 = '/output'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Volumes': {
+ volume1: {},
+ volume2: {}
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data,
+ ConfigOptions(volumes=[volume1, volume2]))
+ self.assertEquals(expected, actual)
+
+ def testEnv(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor'
+ }
+ }
+ env = {'baz': 'blah',
+ 'foo': 'bar',}
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Env': [
+ 'baz=blah',
+ 'foo=bar',
+ ],
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(env=env))
+ self.assertEquals(expected, actual)
+
+ def testEnvResolveReplace(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Env': [
+ 'foo=bar',
+ 'baz=blah',
+ 'blah=still around',
+ ],
+ }
+ }
+ env = {'baz': 'replacement',
+ 'foo': '$foo:asdf',}
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Env': [
+ 'baz=replacement',
+ 'blah=still around',
+ 'foo=bar:asdf',
+ ],
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(env=env))
+ self.assertEquals(expected, actual)
+
+ def testLabel(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor'
+ }
+ }
+ labels = {'baz': 'blah',
+ 'foo': 'bar',}
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Label': [
+ 'baz=blah',
+ 'foo=bar',
+ ],
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(labels=labels))
+ self.assertEquals(expected, actual)
+
+ def testAugmentLabel(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Label': [
+ 'baz=blah',
+ 'blah=still around',
+ ],
+ }
+ }
+ labels = {'baz': 'replacement',
+ 'foo': 'bar',}
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Label': [
+ 'baz=replacement',
+ 'blah=still around',
+ 'foo=bar',
+ ],
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(labels=labels))
+ self.assertEquals(expected, actual)
+
+ def testAugmentVolumeWithNullInput(self):
+ in_data = {
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Volumes': None,
+ }
+ }
+ volume = '/data'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'mattmoor',
+ 'WorkingDir': '/usr/home/mattmoor',
+ 'Volumes': {
+ volume: {}
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(volumes=[volume]))
+ self.assertEquals(expected, actual)
+
+ def testSetWorkingDir(self):
+ in_data = {
+ 'config': {
+ 'User': 'bleh',
+ 'WorkingDir': '/home/bleh',
+ 'Volumes': {
+ }
+ }
+ }
+ workdir = '/some/path'
+ expected = self.base_expected.copy()
+ expected.update({
+ 'config': {
+ 'User': 'bleh',
+ 'WorkingDir': '/some/path',
+ 'Volumes': {
+ }
+ },
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(workdir=workdir))
+ self.assertEquals(expected, actual)
+
+ def testLayersAddedToDiffIds(self):
+ initial_diff_ids = [
+ 'sha256:1',
+ 'sha256:2',
+ ]
+ in_data = {
+ 'rootfs': {
+ 'type': 'layers',
+ 'diff_ids': initial_diff_ids,
+ }
+ }
+ layers = ['3', '4']
+ expected = self.base_expected.copy()
+ expected.update({
+ 'rootfs': {
+ 'type': 'layers',
+ 'diff_ids': initial_diff_ids + ['sha256:%s' % l for l in layers],
+ }
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions(layers=layers))
+ self.assertEquals(expected, actual)
+
+ def testHistoryAdded(self):
+ in_data = self.base_expected.copy()
+ expected = self.base_expected.copy()
+ expected.update({
+ 'history': [
+ {
+ 'author': 'Bazel',
+ 'created': '0001-01-01T00:00:00Z',
+ 'created_by': 'bazel build ...'
+ }, {
+ 'author': 'Bazel',
+ 'created': '0001-01-01T00:00:00Z',
+ 'created_by': 'bazel build ...'
+ }
+ ]
+ })
+
+ actual = CreateImageConfig(in_data, ConfigOptions())
+ self.assertEquals(expected, actual)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/build_defs/docker/docker.bzl b/tools/build_defs/docker/docker.bzl
index 5fcedd5007..29a6a50328 100644
--- a/tools/build_defs/docker/docker.bzl
+++ b/tools/build_defs/docker/docker.bzl
@@ -137,6 +137,58 @@ def _get_base_artifact(ctx):
def _serialize_dict(dict_value):
return ",".join(["%s=%s" % (k, dict_value[k]) for k in dict_value])
+def _image_config(ctx, layer_names):
+ """Create the configuration for a new docker image."""
+ config = ctx.new_file(ctx.label.name + ".config")
+
+ label_file_dict = dict()
+ for i in range(len(ctx.files.label_files)):
+ fname = ctx.attr.label_file_strings[i]
+ file = ctx.files.label_files[i]
+ label_file_dict[fname] = file
+
+ labels = dict()
+ for l in ctx.attr.labels:
+ fname = ctx.attr.labels[l]
+ if fname[0] == '@':
+ labels[l] = "@" + label_file_dict[fname[1:]].path
+ else:
+ labels[l] = fname
+
+ args = [
+ "--output=%s" % config.path,
+ "--entrypoint=%s" % ",".join(ctx.attr.entrypoint),
+ "--command=%s" % ",".join(ctx.attr.cmd),
+ "--labels=%s" % _serialize_dict(labels),
+ "--env=%s" % _serialize_dict(ctx.attr.env),
+ "--ports=%s" % ",".join(ctx.attr.ports),
+ "--volumes=%s" % ",".join(ctx.attr.volumes)
+ ]
+ if ctx.attr.user:
+ args += ["--user=" + ctx.attr.user]
+ if ctx.attr.workdir:
+ args += ["--workdir=" + ctx.attr.workdir]
+
+ inputs = layer_names
+ args += ["--layer=@" + l.path for l in layer_names]
+
+ if ctx.attr.label_files:
+ inputs += ctx.files.label_files
+
+ base = _get_base_artifact(ctx)
+ if base:
+ args += ["--base=%s" % base.path]
+ inputs += [base]
+
+ ctx.action(
+ executable = ctx.executable.create_image_config,
+ arguments = args,
+ inputs = inputs,
+ outputs = [config],
+ use_default_shell_env=True,
+ mnemonic = "ImageConfig")
+ return config
+
def _metadata_action(ctx, layer, name, output):
"""Generate the action to create the JSON metadata for the layer."""
rewrite_tool = ctx.executable.rewrite_tool
@@ -216,28 +268,42 @@ def _metadata(ctx, layer, name):
_metadata_action(ctx, layer, name, metadata)
return metadata
-def _create_image(ctx, layer, name, metadata):
+def _create_image(ctx, layers, id, config, name, metadata):
"""Create the new image."""
- create_image = ctx.executable.create_image
args = [
"--output=" + ctx.outputs.layer.path,
- "--metadata=" + metadata.path,
- "--layer=" + layer.path,
- "--id=@" + name.path,
+ "--id=@" + id.path,
+ "--config=" + config.path,
]
- inputs = [layer, metadata, name]
+
+ args += ["--layer=@%s=%s" % (l["name"].path, l["layer"].path) for l in layers]
+ inputs = [id, config] + [l["name"] for l in layers] + [l["layer"] for l in layers]
+
+ if name:
+ args += ["--legacy_id=@" + name.path]
+ inputs += [name]
+
+ if metadata:
+ args += ["--metadata=" + metadata.path]
+ inputs += [metadata]
+
# If we have been provided a base image, add it.
if ctx.attr.base and not hasattr(ctx.attr.base, "docker_layers"):
- base = _get_base_artifact(ctx)
- if base:
- args += ["--base=%s" % base.path]
- inputs += [base]
+ legacy_base = _get_base_artifact(ctx)
+ if legacy_base:
+ args += ["--legacy_base=%s" % legacy_base.path]
+ inputs += [legacy_base]
+
+ base = _get_base_artifact(ctx)
+ if base:
+ args += ["--base=%s" % base.path]
+ inputs += [base]
ctx.action(
- executable = create_image,
+ executable = ctx.executable.create_image,
arguments = args,
inputs = inputs,
outputs = [ctx.outputs.layer],
- mnemonic = "CreateLayer",
+ mnemonic = "CreateImage",
)
def _assemble_image(ctx, layers, name):
@@ -278,35 +344,47 @@ def _get_runfile_path(ctx, f):
def _docker_build_impl(ctx):
"""Implementation for the docker_build rule."""
layer = _build_layer(ctx)
+ layer_sha = _sha256(ctx, layer)
+
+ config = _image_config(ctx, [layer_sha])
+ id = _sha256(ctx, config)
+
name = _compute_layer_name(ctx, layer)
metadata = _metadata(ctx, layer, name)
- _create_image(ctx, layer, name, metadata)
+
+ # creating a partial image so only pass the layers that belong to it
+ image_layer = {"layer": layer, "name": layer_sha}
+ _create_image(ctx, [image_layer], id, config, name, metadata)
+
# Compute the layers transitive provider.
# It includes the current layers, and, if they exists the layer from
# base docker_build rules. We do not extract the list of layer in
# a base tarball as they probably do not respect the convention on
# layer naming that our rules use.
layers = [
- {"layer": ctx.outputs.layer, "name": name}
+ {"layer": ctx.outputs.layer, "id": id, "name": name}
] + getattr(ctx.attr.base, "docker_layers", [])
# Generate the incremental load statement
ctx.template_action(
template = ctx.file.incremental_load_template,
substitutions = {
"%{load_statements}": "\n".join([
- "incr_load '%s' '%s'" % (_get_runfile_path(ctx, l["name"]),
- _get_runfile_path(ctx, l["layer"]))
+ "incr_load '%s' '%s' '%s'" % (_get_runfile_path(ctx, l["name"]),
+ _get_runfile_path(ctx, l["id"]),
+ _get_runfile_path(ctx, l["layer"]))
# The last layer is the first in the list of layers.
# We reverse to load the layer from the parent to the child.
for l in reverse(layers)]),
"%{repository}": _repository_name(ctx),
- "%{tag}" : ctx.label.name,
+ "%{tag}": ctx.label.name,
},
output = ctx.outputs.executable,
executable = True)
- _assemble_image(ctx, layers, name)
+ _assemble_image(ctx, reverse(layers), name)
runfiles = ctx.runfiles(
- files = [l["layer"] for l in layers] + [l["name"] for l in layers])
+ files = [l["name"] for l in layers] +
+ [l["id"] for l in layers] +
+ [l["layer"] for l in layers])
return struct(runfiles = runfiles,
files = set([ctx.outputs.layer]),
docker_layers = layers)
@@ -336,31 +414,36 @@ docker_build_ = rule(
allow_files=True),
"label_file_strings": attr.string_list(),
"build_layer": attr.label(
- default=Label("@bazel_tools//tools/build_defs/pkg:build_tar"),
+ default=Label("//tools/build_defs/pkg:build_tar"),
cfg=HOST_CFG,
executable=True,
allow_files=True),
"create_image": attr.label(
- default=Label("@bazel_tools//tools/build_defs/docker:create_image"),
+ default=Label("//tools/build_defs/docker:create_image"),
cfg=HOST_CFG,
executable=True,
allow_files=True),
"incremental_load_template": attr.label(
- default=Label("@bazel_tools//tools/build_defs/docker:incremental_load_template"),
+ default=Label("//tools/build_defs/docker:incremental_load_template"),
single_file=True,
allow_files=True),
"join_layers": attr.label(
- default=Label("@bazel_tools//tools/build_defs/docker:join_layers"),
+ default=Label("//tools/build_defs/docker:join_layers"),
cfg=HOST_CFG,
executable=True,
allow_files=True),
"rewrite_tool": attr.label(
- default=Label("@bazel_tools//tools/build_defs/docker:rewrite_json"),
+ default=Label("//tools/build_defs/docker:rewrite_json"),
+ cfg=HOST_CFG,
+ executable=True,
+ allow_files=True),
+ "create_image_config": attr.label(
+ default=Label("//tools/build_defs/docker:create_image_config"),
cfg=HOST_CFG,
executable=True,
allow_files=True),
"sha256": attr.label(
- default=Label("@bazel_tools//tools/build_defs/docker:sha256"),
+ default=Label("//tools/build_defs/docker:sha256"),
cfg=HOST_CFG,
executable=True,
allow_files=True)
@@ -375,6 +458,9 @@ docker_build_ = rule(
# is a single additional layer atop 'base'. The goal is to have relatively
# complete support for building docker image, from the Dockerfile spec.
#
+# For more information see the 'Config' section of the image specification:
+# https://github.com/opencontainers/image-spec/blob/v0.2.0/serialization.md
+#
# Only 'name' is required. All other fields have sane defaults.
#
# docker_build(
@@ -383,7 +469,7 @@ docker_build_ = rule(
#
# # The base layers on top of which to overlay this layer,
# # equivalent to FROM.
-# base="//another/build:rule",]
+# base="//another/build:rule",
#
# # The base directory of the files, defaulted to
# # the package of the input.
@@ -420,10 +506,6 @@ docker_build_ = rule(
# # https://docs.docker.com/reference/builder/#expose
# ports=[...],
#
-# # TODO(mattmoor): NYI
-# # https://docs.docker.com/reference/builder/#maintainer
-# maintainer="...",
-#
# # https://docs.docker.com/reference/builder/#user
# # NOTE: the normal directive affects subsequent RUN, CMD,
# # and ENTRYPOINT
@@ -445,9 +527,6 @@ docker_build_ = rule(
# ...
# "varN": "valN",
# },
-#
-# # NOTE: Without a motivating use case, there is little reason to support:
-# # https://docs.docker.com/reference/builder/#onbuild
# )
def docker_build(**kwargs):
"""Package a docker image.
@@ -459,7 +538,9 @@ def docker_build(**kwargs):
layer.tar
VERSION
json
+ {image-config-sha256}.json
...
+ manifest.json
repositories
top # an implementation detail of our rules, not consumed by Docker.
This rule appends a single new layer to the tarball of this form provided
diff --git a/tools/build_defs/docker/incremental_load.sh.tpl b/tools/build_defs/docker/incremental_load.sh.tpl
index 2db031d478..5ca164df05 100644
--- a/tools/build_defs/docker/incremental_load.sh.tpl
+++ b/tools/build_defs/docker/incremental_load.sh.tpl
@@ -14,12 +14,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# This is a generated files that loads all docker layer built by "docker_build".
+# This is a generated file that loads all docker layers built by "docker_build".
RUNFILES="${PYTHON_RUNFILES:-${BASH_SOURCE[0]}.runfiles}"
DOCKER="${DOCKER:-docker}"
+FULL_DOCKER_VERSION=$(docker version -f {{.Server.Version}} 2> /dev/null \
+ || echo "1.10.0")
+DOCKER_MAJOR_VERSION=$(echo "$FULL_DOCKER_VERSION" | sed -r 's#^([0-9]+)\..*#\1#')
+DOCKER_MINOR_VERSION=$(echo "$FULL_DOCKER_VERSION" | sed -r 's#^[0-9]+\.([0-9]+).*#\1#')
+if [ "$DOCKER_MAJOR_VERSION" -eq "1" ] && [ "$DOCKER_MINOR_VERSION" -lt "10" ]; then
+ LEGACY_DOCKER=true
+else
+ LEGACY_DOCKER=false
+fi
+
# List all images identifier (only the identifier) from the local
# docker registry.
IMAGES="$("${DOCKER}" images -aq)"
@@ -30,12 +40,17 @@ IMAGE_LEN=$(for i in $IMAGES; do echo -n $i | wc -c; done | sort -g | head -1 |
function incr_load() {
# Load a layer if and only if the layer is not in "$IMAGES", that is
# in the local docker registry.
- name=$(cat ${RUNFILES}/$1)
+ if [ "$LEGACY_DOCKER" = true ]; then
+ name=$(cat ${RUNFILES}/$1)
+ else
+ name=$(cat ${RUNFILES}/$2)
+ fi
+
if (echo "$IMAGES" | grep -q ^${name:0:$IMAGE_LEN}$); then
echo "Skipping $name, already loaded."
else
echo "Loading $name..."
- "${DOCKER}" load -i ${RUNFILES}/$2
+ "${DOCKER}" load -i ${RUNFILES}/$3
fi
}
@@ -47,5 +62,9 @@ function incr_load() {
if [ -n "${name}" ]; then
TAG="${1:-%{repository}:%{tag}}"
echo "Tagging ${name} as ${TAG}"
- "${DOCKER}" tag -f ${name} ${TAG}
+ if [ "$LEGACY_DOCKER" = true ]; then
+ "${DOCKER}" tag -f ${name} ${TAG}
+ else
+ "${DOCKER}" tag ${name} ${TAG}
+ fi
fi
diff --git a/tools/build_defs/docker/join_layers.py b/tools/build_defs/docker/join_layers.py
index d0273a6085..7cf8381a1f 100644
--- a/tools/build_defs/docker/join_layers.py
+++ b/tools/build_defs/docker/join_layers.py
@@ -19,9 +19,11 @@
# --name=myname --repository=repositoryName
# See the gflags declaration about the flags argument details.
+import json
import os.path
import sys
+from tools.build_defs.docker import utils
from tools.build_defs.pkg import archive
from third_party.py import gflags
@@ -39,15 +41,14 @@ gflags.DEFINE_string(
gflags.DEFINE_string(
'name', None,
- 'The symbolic name of this image (use with --id and --repsoitory).')
+ 'The symbolic name of this image (use with --id and --repository).')
FLAGS = gflags.FLAGS
def _layer_filter(name):
- """Ignore files 'top' and 'repositories' when merging layers."""
basename = os.path.basename(name)
- return basename not in ('top', 'repositories')
+ return basename not in ('manifest.json', 'top', 'repositories')
def create_image(output, layers, identifier=None,
@@ -61,9 +62,16 @@ def create_image(output, layers, identifier=None,
name: symbolic name for this docker image.
repository: repository name for this docker image.
"""
+ manifest = []
+
tar = archive.TarFileWriter(output)
for layer in layers:
tar.add_tar(layer, name_filter=_layer_filter)
+ manifest += utils.GetManifestFromTar(layer)
+
+ manifest_content = json.dumps(manifest, sort_keys=True)
+ tar.add_file('manifest.json', content=manifest_content)
+
# In addition to N layers of the form described above, there might be
# a single file at the top of the image called repositories.
# This file contains a JSON blob of the form:
diff --git a/tools/build_defs/docker/rewrite_json.py b/tools/build_defs/docker/rewrite_json.py
index 933a640d2e..38fd01def0 100644
--- a/tools/build_defs/docker/rewrite_json.py
+++ b/tools/build_defs/docker/rewrite_json.py
@@ -18,8 +18,8 @@ import json
import os
import os.path
import sys
-import tarfile
+from tools.build_defs.docker import utils
from third_party.py import gflags
gflags.DEFINE_string(
@@ -237,27 +237,6 @@ def RewriteMetadata(data, options):
return output
-def GetTarFile(f, name):
- """Return the content of a file inside a tar file.
-
- This method looks for ./f, /f and f file entry in a tar file and if found,
- return its content. This allows to read file with various path prefix.
-
- Args:
- f: The tar file to read.
- name: The name of the file inside the tar file.
-
- Returns:
- The content of the file, or None if not found.
- """
- with tarfile.open(f, 'r') as tar:
- members = [tarinfo.name for tarinfo in tar.getmembers()]
- for i in ['', './', '/']:
- if i + name in members:
- return tar.extractfile(i + name).read()
- return None
-
-
def GetParentIdentifier(f):
"""Try to look at the parent identifier from a docker image.
@@ -272,10 +251,10 @@ def GetParentIdentifier(f):
The identifier of the docker image, or None if no identifier was found.
"""
# TODO(dmarting): Maybe we could drop the 'top' file all together?
- top = GetTarFile(f, 'top')
+ top = utils.GetTarFile(f, 'top')
if top:
return top.strip()
- repositories = GetTarFile(f, 'repositories')
+ repositories = utils.GetTarFile(f, 'repositories')
if repositories:
data = json.loads(repositories)
for k1 in data:
@@ -291,7 +270,7 @@ def main(unused_argv):
if FLAGS.base:
parent = GetParentIdentifier(FLAGS.base)
if parent:
- base_json = GetTarFile(FLAGS.base, '%s/json' % parent)
+ base_json = utils.GetTarFile(FLAGS.base, '%s/json' % parent)
data = json.loads(base_json)
name = FLAGS.name
diff --git a/tools/build_defs/docker/testdata/BUILD b/tools/build_defs/docker/testdata/BUILD
index 5feaba10e5..c430ca27a7 100644
--- a/tools/build_defs/docker/testdata/BUILD
+++ b/tools/build_defs/docker/testdata/BUILD
@@ -222,6 +222,12 @@ docker_build(
]]
docker_build(
+ name = "with_user",
+ base = ":base_with_volume",
+ user = "nobody",
+)
+
+docker_build(
name = "link_with_files_base",
base = ":files_base",
symlinks = {
@@ -353,6 +359,12 @@ docker_build(
)
docker_build(
+ name = "notop_with_user",
+ base = ":notop_base_with_volume",
+ user = "nobody",
+)
+
+docker_build(
name = "notop_link_with_files_base",
base = ":notop_files_base",
symlinks = {
diff --git a/tools/build_defs/docker/testenv.sh b/tools/build_defs/docker/testenv.sh
index 977d1db42c..6840ad0951 100755
--- a/tools/build_defs/docker/testenv.sh
+++ b/tools/build_defs/docker/testenv.sh
@@ -19,7 +19,7 @@
[ -z "$TEST_SRCDIR" ] && { echo "TEST_SRCDIR not set!" >&2; exit 1; }
# Load the unit-testing framework
-source "${TEST_SRCDIR}/src/test/shell/unittest.bash" || \
+source "${TEST_SRCDIR}/io_bazel/src/test/shell/unittest.bash" || \
{ echo "Failed to source unittest.bash" >&2; exit 1; }
-readonly TEST_DATA_DIR="${TEST_SRCDIR}/tools/build_defs/docker/testdata"
+readonly TEST_DATA_DIR="${TEST_SRCDIR}/io_bazel/tools/build_defs/docker/testdata"
diff --git a/tools/build_defs/docker/utils.py b/tools/build_defs/docker/utils.py
new file mode 100644
index 0000000000..f6c53d01cc
--- /dev/null
+++ b/tools/build_defs/docker/utils.py
@@ -0,0 +1,84 @@
+# Copyright 2015 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""This package contains various functions used when building containers."""
+
+import json
+import tarfile
+
+
+def ExtractValue(value):
+ """Return the contents of a file point to by value if it starts with an @.
+
+ Args:
+ value: The possible filename to extract or a string.
+
+ Returns:
+ The content of the file if value starts with an @, or the passed value.
+ """
+ if value.startswith('@'):
+ with open(value[1:], 'r') as f:
+ value = f.read()
+ return value
+
+
+def GetTarFile(f, name):
+ """Returns the content of a file inside a tar file.
+
+ This method looks for ./f, /f and f file entry in a tar file and if found,
+ return its content. This allows to read file with various path prefix.
+
+ Args:
+ f: The tar file to read.
+ name: The name of the file inside the tar file.
+
+ Returns:
+ The content of the file, or None if not found.
+ """
+ with tarfile.open(f, 'r') as tar:
+ members = [tarinfo.name for tarinfo in tar.getmembers()]
+ for i in ['', './', '/']:
+ if i + name in members:
+ return tar.extractfile(i + name).read()
+ return None
+
+
+def GetManifestFromTar(f=None):
+ """Returns the manifest array from a tar file.
+
+ Args:
+ f: The tar file to read.
+
+ Returns:
+ The content of the manifest file or an empty array if not found.
+ """
+ if f:
+ raw_manifest_data = GetTarFile(f, 'manifest.json')
+ if raw_manifest_data:
+ return json.loads(raw_manifest_data)
+ return []
+
+
+def GetLatestManifestFromTar(f=None):
+ """Returns the latest manifest entry from a tar file.
+
+ The latest manifest entry is the one at the bottom.
+
+ Args:
+ f: The tar file to read.
+
+ Returns:
+ The latest manifest entry object, or None if not found.
+ """
+ manifest_data = GetManifestFromTar(f)
+ return manifest_data[-1] if manifest_data else None