streamLayeredImage: add dynamic tagging of docker image (#329425)

* streamLayeredImage: self-document the script

'podman load' doesn't let me override the name/tag from the image.
This name and tag is dynamic and in a CI environment so I would like to
be able to adjust the tag dynamically.
Since the image is streamed by stream_layered_image.py, there is no need
to stick with the nix-hardcoded image name/tag: the python script can
accept another name

I've added argparse to expose the feature. It has the added benefit of
adding `--help` support to the script, which makes its usage
self-explanatory.

* blackified file

Changed files
+67 -35
pkgs
build-support
+2
pkgs/build-support/docker/default.nix
···
preferLocalBuild = true;
passthru = passthru // {
inherit (conf) imageTag;
+
inherit conf;
+
inherit streamScript;
# Distinguish tarballs and exes at the Nix level so functions that
# take images can know in advance how the image is supposed to be used.
+65 -35
pkgs/build-support/docker/stream_layered_image.py
···
[2]: https://github.com/moby/moby/blob/4fb59c20a4fb54f944fe170d0ff1d00eb4a24d6f/image/spec/v1.2.md#image-json-field-descriptions
""" # noqa: E501
-
+
import argparse
import io
import os
import re
···
"""
invalid_paths = [i for i in paths if not i.startswith(store_dir)]
-
assert len(invalid_paths) == 0, \
-
f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}"
+
assert (
+
len(invalid_paths) == 0
+
), f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}"
# First, calculate the tarball checksum and the size.
extract_checksum = ExtractChecksum()
-
archive_paths_to(
-
extract_checksum,
-
paths,
-
mtime, uid, gid, uname, gname
-
)
+
archive_paths_to(extract_checksum, paths, mtime, uid, gid, uname, gname)
(checksum, size) = extract_checksum.extract()
path = f"{checksum}/layer.tar"
···
# Then actually stream the contents to the outer tarball.
read_fd, write_fd = os.pipe()
with open(read_fd, "rb") as read, open(write_fd, "wb") as write:
+
def producer():
-
archive_paths_to(
-
write,
-
paths,
-
mtime, uid, gid, uname, gname
-
)
+
archive_paths_to(write, paths, mtime, uid, gid, uname, gname)
write.close()
# Closing the write end of the fifo also closes the read end,
···
target_tar.addfile(tarinfo, f)
return LayerInfo(
-
size=None,
-
checksum=checksum,
-
path=path,
-
paths=[customisation_layer]
+
size=None, checksum=checksum, path=path, paths=[customisation_layer]
)
···
def main():
-
with open(sys.argv[1], "r") as f:
+
arg_parser = argparse.ArgumentParser(
+
description="""
+
This script generates a Docker image from a set of store paths. Uses
+
Docker Image Specification v1.2 as reference [1].
+
+
[1]: https://github.com/moby/moby/blob/master/image/spec/v1.2.md
+
"""
+
)
+
arg_parser.add_argument(
+
"conf",
+
type=str,
+
help="""
+
JSON file with the following properties and writes the
+
image as an uncompressed tarball to stdout:
+
+
* "architecture", "config", "os", "created", "repo_tag" correspond to
+
the fields with the same name on the image spec [2].
+
* "created" can be "now".
+
* "created" is also used as mtime for files added to the image.
+
* "uid", "gid", "uname", "gname" is the file ownership, for example,
+
0, 0, "root", "root".
+
* "store_layers" is a list of layers in ascending order, where each
+
layer is the list of store paths to include in that layer.
+
""",
+
)
+
arg_parser.add_argument(
+
"--repo_tag", "-t", type=str,
+
help="Override the RepoTags from the configuration"
+
)
+
+
args = arg_parser.parse_args()
+
with open(args.conf, "r") as f:
conf = json.load(f)
created = (
-
datetime.now(tz=timezone.utc)
-
if conf["created"] == "now"
-
else datetime.fromisoformat(conf["created"])
+
datetime.now(tz=timezone.utc)
+
if conf["created"] == "now"
+
else datetime.fromisoformat(conf["created"])
)
mtime = int(created.timestamp())
uid = int(conf["uid"])
···
start = len(layers) + 1
for num, store_layer in enumerate(conf["store_layers"], start=start):
-
print("Creating layer", num, "from paths:", store_layer,
-
file=sys.stderr)
-
info = add_layer_dir(tar, store_layer, store_dir,
-
mtime, uid, gid, uname, gname)
+
print(
+
"Creating layer",
+
num,
+
"from paths:",
+
store_layer,
+
file=sys.stderr,
+
)
+
info = add_layer_dir(
+
tar, store_layer, store_dir, mtime, uid, gid, uname, gname
+
)
layers.append(info)
-
print("Creating layer", len(layers) + 1, "with customisation...",
-
file=sys.stderr)
+
print(
+
"Creating layer",
+
len(layers) + 1,
+
"with customisation...",
+
file=sys.stderr,
+
)
layers.append(
-
add_customisation_layer(
-
tar,
-
conf["customisation_layer"],
-
mtime=mtime
-
)
+
add_customisation_layer(
+
tar, conf["customisation_layer"], mtime=mtime
+
)
)
print("Adding manifests...", file=sys.stderr)
···
},
"history": [
{
-
"created": datetime.isoformat(created),
-
"comment": f"store paths: {layer.paths}"
+
"created": datetime.isoformat(created),
+
"comment": f"store paths: {layer.paths}",
}
for layer in layers
],
···
manifest_json = [
{
"Config": image_json_path,
-
"RepoTags": [conf["repo_tag"]],
+
"RepoTags": [args.repo_tag or conf["repo_tag"]],
"Layers": [layer.path for layer in layers],
}
]