From dcaef100c1f10d12fbd674cfc8b415c3576f28c1 Mon Sep 17 00:00:00 2001 From: Adam Simpson Date: Thu, 11 Jul 2019 16:08:39 -0700 Subject: [PATCH 1/7] Move from image-tools to umoci for OCI unpacking --- CONTRIBUTORS.md | 1 + go.mod | 24 +- go.sum | 58 +- .../pkg/build/sources/conveyorPacker_oci.go | 48 +- vendor/github.com/apex/log/LICENSE | 22 + vendor/github.com/apex/log/Makefile | 2 + vendor/github.com/apex/log/Readme.md | 29 + vendor/github.com/apex/log/default.go | 45 + vendor/github.com/apex/log/doc.go | 10 + vendor/github.com/apex/log/entry.go | 172 ++ vendor/github.com/apex/log/interface.go | 19 + vendor/github.com/apex/log/levels.go | 81 + vendor/github.com/apex/log/logger.go | 149 ++ vendor/github.com/apex/log/pkg.go | 100 ++ vendor/github.com/apex/log/stack.go | 8 + .../cyphar/filepath-securejoin/.travis.yml | 19 + .../cyphar/filepath-securejoin/LICENSE | 28 + .../cyphar/filepath-securejoin/README.md | 65 + .../cyphar/filepath-securejoin/VERSION | 1 + .../cyphar/filepath-securejoin/join.go | 134 ++ .../cyphar/filepath-securejoin/vendor.conf | 1 + .../cyphar/filepath-securejoin/vfs.go | 41 + vendor/github.com/hashicorp/errwrap/README.md | 2 +- .../hashicorp/go-multierror/format.go | 6 +- .../hashicorp/go-multierror/sort.go | 16 + vendor/github.com/klauspost/compress/LICENSE | 27 + .../klauspost/compress/flate/copy.go | 32 + .../klauspost/compress/flate/crc32_amd64.go | 42 + .../klauspost/compress/flate/crc32_amd64.s | 214 +++ .../klauspost/compress/flate/crc32_noasm.go | 35 + .../klauspost/compress/flate/deflate.go | 1353 ++++++++++++++++ .../klauspost/compress/flate/dict_decoder.go | 184 +++ .../klauspost/compress/flate/gen.go | 265 ++++ .../compress/flate/huffman_bit_writer.go | 701 +++++++++ .../klauspost/compress/flate/huffman_code.go | 344 ++++ .../klauspost/compress/flate/inflate.go | 880 +++++++++++ .../klauspost/compress/flate/reverse_bits.go | 48 + .../klauspost/compress/flate/snappy.go | 900 +++++++++++ .../klauspost/compress/flate/token.go | 115 ++ vendor/github.com/klauspost/cpuid/.gitignore | 24 + vendor/github.com/klauspost/cpuid/.travis.yml | 23 + .../klauspost/cpuid/CONTRIBUTING.txt | 35 + vendor/github.com/klauspost/cpuid/LICENSE | 22 + vendor/github.com/klauspost/cpuid/README.md | 145 ++ vendor/github.com/klauspost/cpuid/cpuid.go | 1040 +++++++++++++ vendor/github.com/klauspost/cpuid/cpuid_386.s | 42 + .../github.com/klauspost/cpuid/cpuid_amd64.s | 42 + .../klauspost/cpuid/detect_intel.go | 17 + .../github.com/klauspost/cpuid/detect_ref.go | 23 + vendor/github.com/klauspost/cpuid/generate.go | 4 + .../github.com/klauspost/cpuid/private-gen.go | 476 ++++++ vendor/github.com/klauspost/crc32/.gitignore | 24 + vendor/github.com/klauspost/crc32/.travis.yml | 13 + vendor/github.com/klauspost/crc32/LICENSE | 28 + vendor/github.com/klauspost/crc32/README.md | 87 ++ vendor/github.com/klauspost/crc32/crc32.go | 207 +++ .../github.com/klauspost/crc32/crc32_amd64.go | 230 +++ .../github.com/klauspost/crc32/crc32_amd64.s | 319 ++++ .../klauspost/crc32/crc32_amd64p32.go | 43 + .../klauspost/crc32/crc32_amd64p32.s | 67 + .../klauspost/crc32/crc32_generic.go | 89 ++ .../klauspost/crc32/crc32_otherarch.go | 15 + .../github.com/klauspost/crc32/crc32_s390x.go | 91 ++ .../github.com/klauspost/crc32/crc32_s390x.s | 249 +++ vendor/github.com/klauspost/pgzip/.gitignore | 24 + vendor/github.com/klauspost/pgzip/.travis.yml | 23 + vendor/github.com/klauspost/pgzip/GO_LICENSE | 27 + vendor/github.com/klauspost/pgzip/LICENSE | 22 + vendor/github.com/klauspost/pgzip/README.md | 136 ++ vendor/github.com/klauspost/pgzip/circle.yml | 7 + vendor/github.com/klauspost/pgzip/gunzip.go | 573 +++++++ vendor/github.com/klauspost/pgzip/gzip.go | 501 ++++++ vendor/github.com/openSUSE/umoci/.gitignore | 4 + vendor/github.com/openSUSE/umoci/.gitmodules | 3 + vendor/github.com/openSUSE/umoci/.lgtm | 2 + vendor/github.com/openSUSE/umoci/.travis.yml | 36 + vendor/github.com/openSUSE/umoci/CHANGELOG.md | 404 +++++ .../openSUSE/umoci/CODE_OF_CONDUCT.md | 30 + .../github.com/openSUSE/umoci/CONTRIBUTING.md | 145 ++ .../openSUSE/umoci/COPYING} | 6 +- vendor/github.com/openSUSE/umoci/Dockerfile | 52 + .../github.com/openSUSE/umoci/GOVERNANCE.md | 18 + vendor/github.com/openSUSE/umoci/MAINTAINERS | 1 + vendor/github.com/openSUSE/umoci/Makefile | 192 +++ vendor/github.com/openSUSE/umoci/README.md | 150 ++ vendor/github.com/openSUSE/umoci/VERSION | 1 + vendor/github.com/openSUSE/umoci/api.go | 47 + vendor/github.com/openSUSE/umoci/go.mod | 35 + vendor/github.com/openSUSE/umoci/go.sum | 62 + .../openSUSE/umoci/oci/cas/README.md | 10 + .../github.com/openSUSE/umoci/oci/cas/cas.go | 109 ++ .../openSUSE/umoci/oci/cas/dir/dir.go | 436 ++++++ .../openSUSE/umoci/oci/casext/blob.go | 152 ++ .../openSUSE/umoci/oci/casext/casext.go | 38 + .../openSUSE/umoci/oci/casext/gc.go | 111 ++ .../openSUSE/umoci/oci/casext/json.go | 45 + .../openSUSE/umoci/oci/casext/map.go | 127 ++ .../openSUSE/umoci/oci/casext/refname.go | 229 +++ .../umoci/oci/casext/verified_blob.go | 40 + .../openSUSE/umoci/oci/casext/walk.go | 194 +++ .../umoci/oci/config/convert/README.md | 11 + .../umoci/oci/config/convert/runtime.go | 176 +++ .../umoci/oci/config/generate/README.md | 11 + .../umoci/oci/config/generate/save.go | 55 + .../umoci/oci/config/generate/spec.go | 383 +++++ .../openSUSE/umoci/oci/layer/README.md | 21 + .../openSUSE/umoci/oci/layer/generate.go | 144 ++ .../openSUSE/umoci/oci/layer/tar_extract.go | 631 ++++++++ .../openSUSE/umoci/oci/layer/tar_generate.go | 305 ++++ .../openSUSE/umoci/oci/layer/tar_unix.go | 33 + .../openSUSE/umoci/oci/layer/unpack.go | 456 ++++++ .../openSUSE/umoci/oci/layer/utils.go | 234 +++ .../openSUSE/umoci/pkg/fseval/fseval.go | 102 ++ .../umoci/pkg/fseval/fseval_default.go | 154 ++ .../umoci/pkg/fseval/fseval_rootless.go | 156 ++ .../umoci/pkg/hardening/verified_reader.go | 176 +++ .../openSUSE/umoci/pkg/idtools/idtools.go | 98 ++ .../openSUSE/umoci/pkg/system/mknod_linux.go | 43 + .../openSUSE/umoci/pkg/system/utime_linux.go | 41 + .../openSUSE/umoci/pkg/system/xattr_linux.go | 120 ++ .../openSUSE/umoci/pkg/testutils/testutils.go | 40 + .../openSUSE/umoci/pkg/unpriv/unpriv.go | 588 +++++++ .../umoci/third_party/shared/COPYING} | 5 +- .../openSUSE/umoci/third_party/shared/util.go | 55 + .../openSUSE/umoci/third_party/user/LICENSE | 191 +++ .../openSUSE/umoci/third_party/user/NOTICE | 17 + .../openSUSE/umoci/third_party/user/README.md | 7 + .../openSUSE/umoci/third_party/user/lookup.go | 128 ++ .../umoci/third_party/user/lookup_unix.go | 48 + .../third_party/user/lookup_unsupported.go | 39 + .../openSUSE/umoci/third_party/user/user.go | 460 ++++++ vendor/github.com/openSUSE/umoci/utils.go | 325 ++++ .../image-spec/schema/config-schema.json | 140 -- .../image-spec/schema/content-descriptor.json | 33 - .../image-spec/schema/defs-descriptor.json | 27 - .../image-spec/schema/defs.json | 91 -- .../opencontainers/image-spec/schema/doc.go | 16 - .../opencontainers/image-spec/schema/error.go | 44 - .../opencontainers/image-spec/schema/fs.go | 323 ---- .../opencontainers/image-spec/schema/gen.go | 21 - .../image-spec/schema/image-index-schema.json | 89 -- .../schema/image-layout-schema.json | 18 - .../schema/image-manifest-schema.json | 34 - .../image-spec/schema/loader.go | 126 -- .../image-spec/schema/schema.go | 75 - .../image-spec/schema/validator.go | 224 --- .../image-tools/image/autodetect.go | 69 - .../image-tools/image/config.go | 120 -- .../image-tools/image/descriptor.go | 144 -- .../opencontainers/image-tools/image/doc.go | 16 - .../opencontainers/image-tools/image/image.go | 389 ----- .../opencontainers/image-tools/image/index.go | 71 - .../image-tools/image/layout.go | 104 -- .../image-tools/image/manifest.go | 342 ---- .../image-tools/image/project.go | 21 - .../image-tools/image/walker.go | 333 ---- .../runtime-tools/generate/config.go | 3 +- .../runtime-tools/generate/generate.go | 14 +- .../rootless-containers/proto/COPYING | 202 +++ .../proto/go-proto/rootlesscontainers.pb.go | 80 + .../go-proto/rootlesscontainers_generate.go | 37 + vendor/github.com/sirupsen/logrus/.travis.yml | 6 +- vendor/github.com/sirupsen/logrus/README.md | 60 +- vendor/github.com/sirupsen/logrus/entry.go | 26 +- vendor/github.com/sirupsen/logrus/exported.go | 20 +- .../github.com/sirupsen/logrus/formatter.go | 20 +- .../sirupsen/logrus/json_formatter.go | 12 +- vendor/github.com/sirupsen/logrus/logger.go | 16 +- .../sirupsen/logrus/text_formatter.go | 31 +- vendor/github.com/urfave/cli/.flake8 | 2 + vendor/github.com/urfave/cli/.gitignore | 2 + vendor/github.com/urfave/cli/.travis.yml | 27 + vendor/github.com/urfave/cli/CHANGELOG.md | 435 ++++++ vendor/github.com/urfave/cli/LICENSE | 21 + vendor/github.com/urfave/cli/README.md | 1381 +++++++++++++++++ vendor/github.com/urfave/cli/app.go | 497 ++++++ vendor/github.com/urfave/cli/appveyor.yml | 26 + vendor/github.com/urfave/cli/category.go | 44 + vendor/github.com/urfave/cli/cli.go | 22 + vendor/github.com/urfave/cli/command.go | 304 ++++ vendor/github.com/urfave/cli/context.go | 278 ++++ vendor/github.com/urfave/cli/errors.go | 115 ++ vendor/github.com/urfave/cli/flag-types.json | 93 ++ vendor/github.com/urfave/cli/flag.go | 799 ++++++++++ .../github.com/urfave/cli/flag_generated.go | 627 ++++++++ vendor/github.com/urfave/cli/funcs.go | 28 + .../github.com/urfave/cli/generate-flag-types | 255 +++ vendor/github.com/urfave/cli/help.go | 338 ++++ vendor/github.com/urfave/cli/runtests | 122 ++ vendor/github.com/vbatts/go-mtree/.gitignore | 6 + vendor/github.com/vbatts/go-mtree/.travis.yml | 20 + vendor/github.com/vbatts/go-mtree/LICENSE | 28 + vendor/github.com/vbatts/go-mtree/Makefile | 85 + vendor/github.com/vbatts/go-mtree/README.md | 213 +++ vendor/github.com/vbatts/go-mtree/check.go | 30 + vendor/github.com/vbatts/go-mtree/cksum.go | 49 + vendor/github.com/vbatts/go-mtree/compare.go | 448 ++++++ vendor/github.com/vbatts/go-mtree/creator.go | 10 + vendor/github.com/vbatts/go-mtree/entry.go | 152 ++ vendor/github.com/vbatts/go-mtree/fseval.go | 54 + vendor/github.com/vbatts/go-mtree/glide.lock | 21 + vendor/github.com/vbatts/go-mtree/glide.yaml | 16 + .../github.com/vbatts/go-mtree/hierarchy.go | 48 + .../github.com/vbatts/go-mtree/keywordfunc.go | 172 ++ .../vbatts/go-mtree/keywordfuncs_bsd.go | 69 + .../vbatts/go-mtree/keywordfuncs_linux.go | 107 ++ .../go-mtree/keywordfuncs_unsupported.go | 47 + vendor/github.com/vbatts/go-mtree/keywords.go | 327 ++++ .../vbatts/go-mtree/lchtimes_unix.go | 22 + .../vbatts/go-mtree/lchtimes_unsupported.go | 11 + .../github.com/vbatts/go-mtree/lookup_new.go | 9 + .../github.com/vbatts/go-mtree/lookup_old.go | 102 ++ vendor/github.com/vbatts/go-mtree/parse.go | 105 ++ .../vbatts/go-mtree/pkg/govis/COPYING | 202 +++ .../vbatts/go-mtree/pkg/govis/README.md | 27 + .../vbatts/go-mtree/pkg/govis/govis.go | 39 + .../vbatts/go-mtree/pkg/govis/unvis.go | 294 ++++ .../vbatts/go-mtree/pkg/govis/vis.go | 177 +++ vendor/github.com/vbatts/go-mtree/releases.md | 11 + .../github.com/vbatts/go-mtree/stat_unix.go | 18 + .../vbatts/go-mtree/stat_windows.go | 12 + vendor/github.com/vbatts/go-mtree/tar.go | 461 ++++++ vendor/github.com/vbatts/go-mtree/update.go | 154 ++ .../github.com/vbatts/go-mtree/updatefuncs.go | 201 +++ .../vbatts/go-mtree/updatefuncs_linux.go | 21 + .../go-mtree/updatefuncs_unsupported.go | 11 + vendor/github.com/vbatts/go-mtree/version.go | 23 + vendor/github.com/vbatts/go-mtree/walk.go | 385 +++++ .../github.com/vbatts/go-mtree/xattr/xattr.go | 42 + .../go-mtree/xattr/xattr_unsupported.go | 21 + .../xeipuuv/gojsonschema/validation.go | 4 +- vendor/go4.org/AUTHORS | 8 - vendor/go4.org/errorutil/highlight.go | 58 - .../x/crypto/ripemd160/ripemd160.go | 120 ++ .../x/crypto/ripemd160/ripemd160block.go | 165 ++ vendor/modules.txt | 58 +- 236 files changed, 29203 insertions(+), 3094 deletions(-) create mode 100644 vendor/github.com/apex/log/LICENSE create mode 100644 vendor/github.com/apex/log/Makefile create mode 100644 vendor/github.com/apex/log/Readme.md create mode 100644 vendor/github.com/apex/log/default.go create mode 100644 vendor/github.com/apex/log/doc.go create mode 100644 vendor/github.com/apex/log/entry.go create mode 100644 vendor/github.com/apex/log/interface.go create mode 100644 vendor/github.com/apex/log/levels.go create mode 100644 vendor/github.com/apex/log/logger.go create mode 100644 vendor/github.com/apex/log/pkg.go create mode 100644 vendor/github.com/apex/log/stack.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/.travis.yml create mode 100644 vendor/github.com/cyphar/filepath-securejoin/LICENSE create mode 100644 vendor/github.com/cyphar/filepath-securejoin/README.md create mode 100644 vendor/github.com/cyphar/filepath-securejoin/VERSION create mode 100644 vendor/github.com/cyphar/filepath-securejoin/join.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/vendor.conf create mode 100644 vendor/github.com/cyphar/filepath-securejoin/vfs.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/flate/copy.go create mode 100644 vendor/github.com/klauspost/compress/flate/crc32_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/crc32_amd64.s create mode 100644 vendor/github.com/klauspost/compress/flate/crc32_noasm.go create mode 100644 vendor/github.com/klauspost/compress/flate/deflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/gen.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_code.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/reverse_bits.go create mode 100644 vendor/github.com/klauspost/compress/flate/snappy.go create mode 100644 vendor/github.com/klauspost/compress/flate/token.go create mode 100644 vendor/github.com/klauspost/cpuid/.gitignore create mode 100644 vendor/github.com/klauspost/cpuid/.travis.yml create mode 100644 vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt create mode 100644 vendor/github.com/klauspost/cpuid/LICENSE create mode 100644 vendor/github.com/klauspost/cpuid/README.md create mode 100644 vendor/github.com/klauspost/cpuid/cpuid.go create mode 100644 vendor/github.com/klauspost/cpuid/cpuid_386.s create mode 100644 vendor/github.com/klauspost/cpuid/cpuid_amd64.s create mode 100644 vendor/github.com/klauspost/cpuid/detect_intel.go create mode 100644 vendor/github.com/klauspost/cpuid/detect_ref.go create mode 100644 vendor/github.com/klauspost/cpuid/generate.go create mode 100644 vendor/github.com/klauspost/cpuid/private-gen.go create mode 100644 vendor/github.com/klauspost/crc32/.gitignore create mode 100644 vendor/github.com/klauspost/crc32/.travis.yml create mode 100644 vendor/github.com/klauspost/crc32/LICENSE create mode 100644 vendor/github.com/klauspost/crc32/README.md create mode 100644 vendor/github.com/klauspost/crc32/crc32.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64.s create mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64p32.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64p32.s create mode 100644 vendor/github.com/klauspost/crc32/crc32_generic.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_otherarch.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_s390x.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_s390x.s create mode 100644 vendor/github.com/klauspost/pgzip/.gitignore create mode 100644 vendor/github.com/klauspost/pgzip/.travis.yml create mode 100644 vendor/github.com/klauspost/pgzip/GO_LICENSE create mode 100644 vendor/github.com/klauspost/pgzip/LICENSE create mode 100644 vendor/github.com/klauspost/pgzip/README.md create mode 100644 vendor/github.com/klauspost/pgzip/circle.yml create mode 100644 vendor/github.com/klauspost/pgzip/gunzip.go create mode 100644 vendor/github.com/klauspost/pgzip/gzip.go create mode 100644 vendor/github.com/openSUSE/umoci/.gitignore create mode 100644 vendor/github.com/openSUSE/umoci/.gitmodules create mode 100644 vendor/github.com/openSUSE/umoci/.lgtm create mode 100644 vendor/github.com/openSUSE/umoci/.travis.yml create mode 100644 vendor/github.com/openSUSE/umoci/CHANGELOG.md create mode 100644 vendor/github.com/openSUSE/umoci/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/openSUSE/umoci/CONTRIBUTING.md rename vendor/{go4.org/LICENSE => github.com/openSUSE/umoci/COPYING} (99%) create mode 100644 vendor/github.com/openSUSE/umoci/Dockerfile create mode 100644 vendor/github.com/openSUSE/umoci/GOVERNANCE.md create mode 100644 vendor/github.com/openSUSE/umoci/MAINTAINERS create mode 100644 vendor/github.com/openSUSE/umoci/Makefile create mode 100644 vendor/github.com/openSUSE/umoci/README.md create mode 100644 vendor/github.com/openSUSE/umoci/VERSION create mode 100644 vendor/github.com/openSUSE/umoci/api.go create mode 100644 vendor/github.com/openSUSE/umoci/go.mod create mode 100644 vendor/github.com/openSUSE/umoci/go.sum create mode 100644 vendor/github.com/openSUSE/umoci/oci/cas/README.md create mode 100644 vendor/github.com/openSUSE/umoci/oci/cas/cas.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/cas/dir/dir.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/casext/blob.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/casext/casext.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/casext/gc.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/casext/json.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/casext/map.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/casext/refname.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/casext/verified_blob.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/casext/walk.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/config/convert/README.md create mode 100644 vendor/github.com/openSUSE/umoci/oci/config/convert/runtime.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/config/generate/README.md create mode 100644 vendor/github.com/openSUSE/umoci/oci/config/generate/save.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/config/generate/spec.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/layer/README.md create mode 100644 vendor/github.com/openSUSE/umoci/oci/layer/generate.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/layer/tar_extract.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/layer/tar_generate.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/layer/tar_unix.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/layer/unpack.go create mode 100644 vendor/github.com/openSUSE/umoci/oci/layer/utils.go create mode 100644 vendor/github.com/openSUSE/umoci/pkg/fseval/fseval.go create mode 100644 vendor/github.com/openSUSE/umoci/pkg/fseval/fseval_default.go create mode 100644 vendor/github.com/openSUSE/umoci/pkg/fseval/fseval_rootless.go create mode 100644 vendor/github.com/openSUSE/umoci/pkg/hardening/verified_reader.go create mode 100644 vendor/github.com/openSUSE/umoci/pkg/idtools/idtools.go create mode 100644 vendor/github.com/openSUSE/umoci/pkg/system/mknod_linux.go create mode 100644 vendor/github.com/openSUSE/umoci/pkg/system/utime_linux.go create mode 100644 vendor/github.com/openSUSE/umoci/pkg/system/xattr_linux.go create mode 100644 vendor/github.com/openSUSE/umoci/pkg/testutils/testutils.go create mode 100644 vendor/github.com/openSUSE/umoci/pkg/unpriv/unpriv.go rename vendor/github.com/{opencontainers/image-tools/LICENSE => openSUSE/umoci/third_party/shared/COPYING} (99%) create mode 100644 vendor/github.com/openSUSE/umoci/third_party/shared/util.go create mode 100644 vendor/github.com/openSUSE/umoci/third_party/user/LICENSE create mode 100644 vendor/github.com/openSUSE/umoci/third_party/user/NOTICE create mode 100644 vendor/github.com/openSUSE/umoci/third_party/user/README.md create mode 100644 vendor/github.com/openSUSE/umoci/third_party/user/lookup.go create mode 100644 vendor/github.com/openSUSE/umoci/third_party/user/lookup_unix.go create mode 100644 vendor/github.com/openSUSE/umoci/third_party/user/lookup_unsupported.go create mode 100644 vendor/github.com/openSUSE/umoci/third_party/user/user.go create mode 100644 vendor/github.com/openSUSE/umoci/utils.go delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/config-schema.json delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/content-descriptor.json delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/defs-descriptor.json delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/defs.json delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/doc.go delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/error.go delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/fs.go delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/gen.go delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/image-index-schema.json delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/image-layout-schema.json delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/image-manifest-schema.json delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/loader.go delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/schema.go delete mode 100644 vendor/github.com/opencontainers/image-spec/schema/validator.go delete mode 100644 vendor/github.com/opencontainers/image-tools/image/autodetect.go delete mode 100644 vendor/github.com/opencontainers/image-tools/image/config.go delete mode 100644 vendor/github.com/opencontainers/image-tools/image/descriptor.go delete mode 100644 vendor/github.com/opencontainers/image-tools/image/doc.go delete mode 100644 vendor/github.com/opencontainers/image-tools/image/image.go delete mode 100644 vendor/github.com/opencontainers/image-tools/image/index.go delete mode 100644 vendor/github.com/opencontainers/image-tools/image/layout.go delete mode 100644 vendor/github.com/opencontainers/image-tools/image/manifest.go delete mode 100644 vendor/github.com/opencontainers/image-tools/image/project.go delete mode 100644 vendor/github.com/opencontainers/image-tools/image/walker.go create mode 100644 vendor/github.com/rootless-containers/proto/COPYING create mode 100644 vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.pb.go create mode 100644 vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers_generate.go create mode 100644 vendor/github.com/urfave/cli/.flake8 create mode 100644 vendor/github.com/urfave/cli/.gitignore create mode 100644 vendor/github.com/urfave/cli/.travis.yml create mode 100644 vendor/github.com/urfave/cli/CHANGELOG.md create mode 100644 vendor/github.com/urfave/cli/LICENSE create mode 100644 vendor/github.com/urfave/cli/README.md create mode 100644 vendor/github.com/urfave/cli/app.go create mode 100644 vendor/github.com/urfave/cli/appveyor.yml create mode 100644 vendor/github.com/urfave/cli/category.go create mode 100644 vendor/github.com/urfave/cli/cli.go create mode 100644 vendor/github.com/urfave/cli/command.go create mode 100644 vendor/github.com/urfave/cli/context.go create mode 100644 vendor/github.com/urfave/cli/errors.go create mode 100644 vendor/github.com/urfave/cli/flag-types.json create mode 100644 vendor/github.com/urfave/cli/flag.go create mode 100644 vendor/github.com/urfave/cli/flag_generated.go create mode 100644 vendor/github.com/urfave/cli/funcs.go create mode 100644 vendor/github.com/urfave/cli/generate-flag-types create mode 100644 vendor/github.com/urfave/cli/help.go create mode 100644 vendor/github.com/urfave/cli/runtests create mode 100644 vendor/github.com/vbatts/go-mtree/.gitignore create mode 100644 vendor/github.com/vbatts/go-mtree/.travis.yml create mode 100644 vendor/github.com/vbatts/go-mtree/LICENSE create mode 100644 vendor/github.com/vbatts/go-mtree/Makefile create mode 100644 vendor/github.com/vbatts/go-mtree/README.md create mode 100644 vendor/github.com/vbatts/go-mtree/check.go create mode 100644 vendor/github.com/vbatts/go-mtree/cksum.go create mode 100644 vendor/github.com/vbatts/go-mtree/compare.go create mode 100644 vendor/github.com/vbatts/go-mtree/creator.go create mode 100644 vendor/github.com/vbatts/go-mtree/entry.go create mode 100644 vendor/github.com/vbatts/go-mtree/fseval.go create mode 100644 vendor/github.com/vbatts/go-mtree/glide.lock create mode 100644 vendor/github.com/vbatts/go-mtree/glide.yaml create mode 100644 vendor/github.com/vbatts/go-mtree/hierarchy.go create mode 100644 vendor/github.com/vbatts/go-mtree/keywordfunc.go create mode 100644 vendor/github.com/vbatts/go-mtree/keywordfuncs_bsd.go create mode 100644 vendor/github.com/vbatts/go-mtree/keywordfuncs_linux.go create mode 100644 vendor/github.com/vbatts/go-mtree/keywordfuncs_unsupported.go create mode 100644 vendor/github.com/vbatts/go-mtree/keywords.go create mode 100644 vendor/github.com/vbatts/go-mtree/lchtimes_unix.go create mode 100644 vendor/github.com/vbatts/go-mtree/lchtimes_unsupported.go create mode 100644 vendor/github.com/vbatts/go-mtree/lookup_new.go create mode 100644 vendor/github.com/vbatts/go-mtree/lookup_old.go create mode 100644 vendor/github.com/vbatts/go-mtree/parse.go create mode 100644 vendor/github.com/vbatts/go-mtree/pkg/govis/COPYING create mode 100644 vendor/github.com/vbatts/go-mtree/pkg/govis/README.md create mode 100644 vendor/github.com/vbatts/go-mtree/pkg/govis/govis.go create mode 100644 vendor/github.com/vbatts/go-mtree/pkg/govis/unvis.go create mode 100644 vendor/github.com/vbatts/go-mtree/pkg/govis/vis.go create mode 100644 vendor/github.com/vbatts/go-mtree/releases.md create mode 100644 vendor/github.com/vbatts/go-mtree/stat_unix.go create mode 100644 vendor/github.com/vbatts/go-mtree/stat_windows.go create mode 100644 vendor/github.com/vbatts/go-mtree/tar.go create mode 100644 vendor/github.com/vbatts/go-mtree/update.go create mode 100644 vendor/github.com/vbatts/go-mtree/updatefuncs.go create mode 100644 vendor/github.com/vbatts/go-mtree/updatefuncs_linux.go create mode 100644 vendor/github.com/vbatts/go-mtree/updatefuncs_unsupported.go create mode 100644 vendor/github.com/vbatts/go-mtree/version.go create mode 100644 vendor/github.com/vbatts/go-mtree/walk.go create mode 100644 vendor/github.com/vbatts/go-mtree/xattr/xattr.go create mode 100644 vendor/github.com/vbatts/go-mtree/xattr/xattr_unsupported.go delete mode 100644 vendor/go4.org/AUTHORS delete mode 100644 vendor/go4.org/errorutil/highlight.go create mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160.go create mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160block.go diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index ea61e6fffc..8cc9d34bb9 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -12,6 +12,7 @@ # Contributors: - Adam Hughes , + - Adam Simpson , - Afif Elghraoui - Amanda Duffy - Ángel Bejarano diff --git a/go.mod b/go.mod index e9f32156d3..577f83c9a4 100644 --- a/go.mod +++ b/go.mod @@ -34,9 +34,7 @@ require ( github.com/docker/docker-credential-helpers v0.6.0 // indirect github.com/docker/go-connections v0.3.0 // indirect github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 // indirect - github.com/docker/go-units v0.3.3 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect - github.com/dsnet/compress v0.0.1 // indirect github.com/fatih/color v1.7.0 github.com/garyburd/redigo v1.6.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect @@ -47,15 +45,15 @@ require ( github.com/gorilla/mux v1.6.2 // indirect github.com/gorilla/websocket v1.4.0 github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect - github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect github.com/hashicorp/go-cleanhttp v0.5.1 // indirect - github.com/hashicorp/go-multierror v0.0.0-20171204182908-b7773ae21874 // indirect github.com/imdario/mergo v0.3.7 // indirect github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56 // indirect github.com/juju/errors v0.0.0-20190207033735-e65537c515d7 // indirect github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect github.com/juju/testing v0.0.0-20190613124551-e81189438503 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/klauspost/compress v1.4.1 // indirect + github.com/klauspost/cpuid v1.2.0 // indirect github.com/kr/pty v1.1.3 github.com/kubernetes-sigs/cri-o v0.0.0-20180917213123-8afc34092907 github.com/mattn/go-colorable v0.1.1 // indirect @@ -65,12 +63,12 @@ require ( github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c // indirect github.com/onsi/ginkgo v1.8.0 // indirect github.com/onsi/gomega v1.5.0 // indirect + github.com/openSUSE/umoci v0.4.4 github.com/opencontainers/go-digest v1.0.0-rc1 - github.com/opencontainers/image-spec v0.0.0-20180411145040-e562b0440392 - github.com/opencontainers/image-tools v0.0.0-20180129025323-c95f76cbae74 + github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/runc v0.1.1 // indirect - github.com/opencontainers/runtime-spec v0.0.0-20180913141938-5806c3563733 - github.com/opencontainers/runtime-tools v0.6.0 + github.com/opencontainers/runtime-spec v1.0.0 + github.com/opencontainers/runtime-tools v0.7.0 github.com/opencontainers/selinux v1.0.0-rc1 github.com/pelletier/go-toml v1.2.0 github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 // indirect @@ -83,7 +81,6 @@ require ( github.com/safchain/ethtool v0.0.0-20180504150752-6e3f4faa84e1 // indirect github.com/satori/go.uuid v1.2.0 github.com/seccomp/libseccomp-golang v0.9.0 - github.com/sirupsen/logrus v1.0.5 // indirect github.com/spf13/cobra v0.0.0-20190321000552-67fc4837d267 github.com/spf13/pflag v1.0.3 github.com/stevvooe/resumable v0.0.0-20180830230917-22b14a53ba50 // indirect @@ -92,17 +89,12 @@ require ( github.com/sylabs/scs-key-client v0.3.0-0.20190509220229-bce3b050c4ec github.com/sylabs/scs-library-client v0.2.2 github.com/sylabs/sif v1.0.5 - github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e // indirect github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6 // indirect github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f // indirect github.com/xenolf/lego v2.5.0+incompatible // indirect github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940 // indirect github.com/yvasiyarov/gorelic v0.0.6 // indirect github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 // indirect - go4.org v0.0.0-20180417224846-9599cf28b011 // indirect golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect @@ -119,6 +111,8 @@ require ( replace ( github.com/Sirupsen/logrus => github.com/sirupsen/logrus v1.0.5 - github.com/opencontainers/image-tools => github.com/sylabs/image-tools v0.0.0-20181006203805-2814f4980568 + github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v0.0.0-20180411145040-e562b0440392 + github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v0.0.0-20180913141938-5806c3563733 golang.org/x/crypto => github.com/sylabs/golang-x-crypto v0.0.0-20181006204705-4bce89e8e9a9 + ) diff --git a/go.sum b/go.sum index e480a5661a..e2b3515d26 100644 --- a/go.sum +++ b/go.sum @@ -9,6 +9,8 @@ github.com/Netflix/go-expect v0.0.0-20180928190340-9d1f4485533b h1:sSQK05nvxs4Uk github.com/Netflix/go-expect v0.0.0-20180928190340-9d1f4485533b/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/alexflint/go-filemutex v0.0.0-20171028004239-d358565f3c3f h1:tbgFqBK8r77y+mT2RKkQ8ukhk/uvPtPZvr3a3166YNw= github.com/alexflint/go-filemutex v0.0.0-20171028004239-d358565f3c3f/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/apex/log v1.1.0 h1:J5rld6WVFi6NxA6m8GJ1LJqu3+GiTFIt3mYv27gdQWI= +github.com/apex/log v1.1.0/go.mod h1:yA770aXIDQrhVOIGurT/pVdfCpSq1GQV/auzMN5fzvY= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -50,6 +52,8 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5t github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c h1:Xo2rK1pzOm0jO6abTPIQwbAmqBIOj132otexc1mmzFc= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v0.0.0-20180611075603-e61299896203 h1:2/TaU1mJO2o4BTLnqz6KxJxe7ektbzoU11yqa8k6N9Y= @@ -76,9 +80,6 @@ github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= -github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= -github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -113,13 +114,13 @@ github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357 h1:Rem2+U35z1QtPQc6r+WolF7yXiefXqDKyk+lN2pE164= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-multierror v0.0.0-20171204182908-b7773ae21874 h1:em+tTnzgU7N22woTBMcSJAOW7tRHAkK597W+MD/CpK8= -github.com/hashicorp/go-multierror v0.0.0-20171204182908-b7773ae21874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0 h1:j30noezaCfvNLcdMYSvHLv81DxYRSt1grlpseG67vhU= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -139,8 +140,16 @@ github.com/juju/testing v0.0.0-20190613124551-e81189438503 h1:ZUgTbk8oHgP0jpMiei github.com/juju/testing v0.0.0-20190613124551-e81189438503/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v0.0.0-20170402124221-0bf5dcad4ada h1:ZHhgRyr+9LYwfuWChpSTCCe/07V26LEElTKUXj+2fAg= +github.com/klauspost/pgzip v0.0.0-20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -153,8 +162,10 @@ github.com/kubernetes-sigs/cri-o v0.0.0-20180917213123-8afc34092907/go.mod h1:OU github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o= @@ -165,6 +176,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb h1:e+l77LJOEqXTIQihQJVkA6ZxPOUmfPM5e4H7rcpgtSk= +github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c h1:xa+eQWKuJ9MbB9FBL/eoNvDFvveAkz2LQoz8PzX7Q/4= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTcW2dxoD/SO3n2enrgWl3y6Dnx4m59GvcA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -172,16 +185,22 @@ github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openSUSE/umoci v0.4.4 h1:CNwlje61gxLf8Yg8wvfFFURN2a9UtczO+6HQS9x+r+s= +github.com/openSUSE/umoci v0.4.4/go.mod h1:WC0knmZfXsEOZyJUv/k3zDOCzjyaFTGMvooGTe4NzIw= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v0.0.0-20180411145040-e562b0440392 h1:rBwY4zl6Rvzh0RyFbELnswKxVfiq7xB/d2sfgy3PmHI= github.com/opencontainers/image-spec v0.0.0-20180411145040-e562b0440392/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.0 h1:jcw3cCH887bLKETGYpv8afogdYchbShR0eH6oD9d5PQ= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v0.0.0-20180913141938-5806c3563733 h1:3g+PhOUU7d+gimwdmJU++EIKFzWUASEviZHhpOt/Zvw= github.com/opencontainers/runtime-spec v0.0.0-20180913141938-5806c3563733/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.6.0 h1:NYIDT9KoSAIfmXpMrulp/j+64c8OBb2l19u3vmOl4so= -github.com/opencontainers/runtime-tools v0.6.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg= +github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.7.0 h1:MIjqgwi4ZC+eVNGiYotCUYuTfs/oWDEcigK9Ra5ruHU= +github.com/opencontainers/runtime-tools v0.7.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.0.0-rc1 h1:Q70KvmpJSrYzryl/d0tC3vWUiTn23cSdStKodlokEPs= github.com/opencontainers/selinux v1.0.0-rc1/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= @@ -202,6 +221,8 @@ github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be h1:MoyXp/VjXUwM0GyDcdwT7Ubea2gxOSHpPaFo3qV+Y2A= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rootless-containers/proto v0.1.0 h1:gS1JOMEtk1YDYHCzBAf/url+olMJbac7MTrgSeP6zh4= +github.com/rootless-containers/proto v0.1.0/go.mod h1:vgkUFZbQd0gcE/K/ZwtE4MYjZPu0UNHLXIQxhyqAFh8= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/safchain/ethtool v0.0.0-20180504150752-6e3f4faa84e1 h1:JG9rY54/XvEELn4yQGdW56Z+w60wWdZ2UD/WRDFF7lI= @@ -210,8 +231,8 @@ github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/seccomp/libseccomp-golang v0.9.0 h1:S1pmhdFh5spQtVojA+4GUdWBqvI8ydYHxrx8iR6xN8o= github.com/seccomp/libseccomp-golang v0.9.0/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.0-20190321000552-67fc4837d267 h1:k5wQOOB9mm6XdgwnTGopZG83by3g8A2MJ7LvrP3h+/0= @@ -226,8 +247,6 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/sylabs/golang-x-crypto v0.0.0-20181006204705-4bce89e8e9a9 h1:OjtuUh4ZvQpHdwDHOgi8LM0skj8imSc2Hz6966oGxKY= github.com/sylabs/golang-x-crypto v0.0.0-20181006204705-4bce89e8e9a9/go.mod h1:Qf7xZmhvuwq9Hq4LdNLS4xabRQkPJSvEP3Bh4UFG0v4= -github.com/sylabs/image-tools v0.0.0-20181006203805-2814f4980568 h1:Bv8RD7DVhhvYw31BJbw2vhUie1jqHmRHjcypRtroG6k= -github.com/sylabs/image-tools v0.0.0-20181006203805-2814f4980568/go.mod h1:1CO+05HLIlepCW8AZHGumlYfh/6mOa6puEIt1Yv0aUM= github.com/sylabs/json-resp v0.5.0 h1:AWdKu6aS0WrkkltX1M0ex0lENrIcx5TISox902s2L2M= github.com/sylabs/json-resp v0.5.0/go.mod h1:anCzED2SGHHZQDubMuoVtwMuJZdpqQ+7iso8yDFm/nQ= github.com/sylabs/scs-build-client v0.0.4 h1:y1rer0Mq+GyAFPKn0szpayJfUSTD7SGgRB3Yh0dJo+g= @@ -241,7 +260,10 @@ github.com/sylabs/sif v1.0.5/go.mod h1:YSrurscNfLqBihRfK4gWYv22UThLcwK76nXg3fLP4 github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e h1:QjF5rxNgRSLHJDwKUvfYP3qOx1vTDzUi/+oSC8FXnCI= github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vbatts/go-mtree v0.4.3 h1:IC2s9EpogK3QzU+VsfuEdM7POkwnW43XDGAWO2Rb1Bo= +github.com/vbatts/go-mtree v0.4.3/go.mod h1:3sazBqLG4bZYmgRTgdh9X3iKTzwBpp5CrREJDzrNSXY= github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6 h1:YqlaLDyh/+jUHgh83iNy8KiCvD4LeqnSS5U5a9cgYqU= github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936 h1:J9gO8RJCAFlln1jsvRba/CWVUnMHwObklfxxjErl1uk= @@ -250,8 +272,8 @@ github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f h1:mvXjJIHRZyhNuGassLTcXTwjiWq7NmjdavZsUnmFybQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v0.0.0-20180719132039-b84684d0e066 h1:iBmpEMJZETMKCupjL9Q7X3Q5utIRnWGbls0TXTgD7JI= +github.com/xeipuuv/gojsonschema v0.0.0-20180719132039-b84684d0e066/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xenolf/lego v2.5.0+incompatible h1:vjkBSqBww+pMeQgH/VjbVZPP+qccOmNE82TgC4CO8cI= github.com/xenolf/lego v2.5.0+incompatible/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -261,9 +283,8 @@ github.com/yvasiyarov/gorelic v0.0.6 h1:qMJQYPNdtJ7UNYHjX38KXZtltKTqimMuoQjNnSVI github.com/yvasiyarov/gorelic v0.0.6/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 h1:AsFN8kXcCVkUFHyuzp1FtYbzp1nCO/H6+1uPSGEyPzM= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -go4.org v0.0.0-20180417224846-9599cf28b011 h1:i0QTVNl3j6yciHiQIHxz+mnsSQqo/xi78EGN7yNpMVw= -go4.org v0.0.0-20180417224846-9599cf28b011/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180801234040-f4c29de78a2a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= @@ -272,6 +293,7 @@ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAG golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180801221139-3dc4335d56c7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/internal/pkg/build/sources/conveyorPacker_oci.go b/internal/pkg/build/sources/conveyorPacker_oci.go index 8e936bb3f2..a9e1bde2fe 100644 --- a/internal/pkg/build/sources/conveyorPacker_oci.go +++ b/internal/pkg/build/sources/conveyorPacker_oci.go @@ -27,8 +27,10 @@ import ( oci "github.com/containers/image/oci/layout" "github.com/containers/image/signature" "github.com/containers/image/types" + "github.com/openSUSE/umoci" + umocilayer "github.com/openSUSE/umoci/oci/layer" + "github.com/openSUSE/umoci/pkg/idtools" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - imagetools "github.com/opencontainers/image-tools/image" ociclient "github.com/sylabs/singularity/internal/pkg/client/oci" "github.com/sylabs/singularity/internal/pkg/sylog" "github.com/sylabs/singularity/internal/pkg/util/shell" @@ -289,9 +291,47 @@ func (cp *OCIConveyorPacker) extractArchive(src string, dst string) error { } func (cp *OCIConveyorPacker) unpackTmpfs() (err error) { - refs := []string{"name=tmp"} - err = imagetools.UnpackLayout(cp.b.Path, cp.b.Rootfs(), "amd64", refs) - return err + var mapOptions umocilayer.MapOptions + + // Allow unpacking as non-root + if os.Geteuid() != 0 { + mapOptions.Rootless = true + + uidMap, err := idtools.ParseMapping(fmt.Sprintf("0:%d:1", os.Geteuid())) + if err != nil { + return fmt.Errorf("failure parsing uidmap: %s", err) + } + mapOptions.UIDMappings = append(mapOptions.UIDMappings, uidMap) + + gidMap, err := idtools.ParseMapping(fmt.Sprintf("0:%d:1", os.Getegid())) + if err != nil { + return fmt.Errorf("failure parsing gidmap: %s", err) + } + mapOptions.GIDMappings = append(mapOptions.GIDMappings, gidMap) + } + + engineExt, err := umoci.OpenLayout(cp.b.Path) + if err != nil { + return fmt.Errorf("Failed to open layout: %s", err) + } + + // Obtain the manifest + imageSource, err := cp.tmpfsRef.NewImageSource(context.Background(), cp.sysCtx) + if err != nil { + return fmt.Errorf("Create image source: %s", err) + } + manifestData, mediaType, err := imageSource.GetManifest(context.Background(), nil) + if err != nil { + return fmt.Errorf("Obtain manifest source: %s", err) + } + if mediaType != imgspecv1.MediaTypeImageManifest { + return fmt.Errorf("Manifest has invalid MIMEtype: %s", mediaType) + } + var manifest imgspecv1.Manifest + json.Unmarshal(manifestData, &manifest) + + // Unpack root filesystem + return umocilayer.UnpackRootfs(context.Background(), engineExt, cp.b.Rootfs(), manifest, &mapOptions) } func (cp *OCIConveyorPacker) insertBaseEnv() (err error) { diff --git a/vendor/github.com/apex/log/LICENSE b/vendor/github.com/apex/log/LICENSE new file mode 100644 index 0000000000..af71800684 --- /dev/null +++ b/vendor/github.com/apex/log/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2015 TJ Holowaychuk tj@tjholowaychuk.com + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/apex/log/Makefile b/vendor/github.com/apex/log/Makefile new file mode 100644 index 0000000000..f948e88ec1 --- /dev/null +++ b/vendor/github.com/apex/log/Makefile @@ -0,0 +1,2 @@ + +include github.com/tj/make/golang diff --git a/vendor/github.com/apex/log/Readme.md b/vendor/github.com/apex/log/Readme.md new file mode 100644 index 0000000000..4a48b6710d --- /dev/null +++ b/vendor/github.com/apex/log/Readme.md @@ -0,0 +1,29 @@ + +![Structured logging for golang](assets/title.png) + +Package log implements a simple structured logging API inspired by Logrus, designed with centralization in mind. Read more on [Medium](https://medium.com/@tjholowaychuk/apex-log-e8d9627f4a9a#.rav8yhkud). + +## Handlers + +- __cli__ – human-friendly CLI output +- __discard__ – discards all logs +- __es__ – Elasticsearch handler +- __graylog__ – Graylog handler +- __json__ – JSON output handler +- __kinesis__ – AWS Kinesis handler +- __level__ – level filter handler +- __logfmt__ – logfmt plain-text formatter +- __memory__ – in-memory handler for tests +- __multi__ – fan-out to multiple handlers +- __papertrail__ – Papertrail handler +- __text__ – human-friendly colored output +- __delta__ – outputs the delta between log calls and spinner + +--- + +[![Build Status](https://semaphoreci.com/api/v1/projects/d8a8b1c0-45b0-4b89-b066-99d788d0b94c/642077/badge.svg)](https://semaphoreci.com/tj/log) +[![GoDoc](https://godoc.org/github.com/apex/log?status.svg)](https://godoc.org/github.com/apex/log) +![](https://img.shields.io/badge/license-MIT-blue.svg) +![](https://img.shields.io/badge/status-stable-green.svg) + + diff --git a/vendor/github.com/apex/log/default.go b/vendor/github.com/apex/log/default.go new file mode 100644 index 0000000000..2213486238 --- /dev/null +++ b/vendor/github.com/apex/log/default.go @@ -0,0 +1,45 @@ +package log + +import ( + "bytes" + "fmt" + "log" + "sort" +) + +// field used for sorting. +type field struct { + Name string + Value interface{} +} + +// by sorts fields by name. +type byName []field + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name } + +// handleStdLog outpouts to the stlib log. +func handleStdLog(e *Entry) error { + level := levelNames[e.Level] + + var fields []field + + for k, v := range e.Fields { + fields = append(fields, field{k, v}) + } + + sort.Sort(byName(fields)) + + var b bytes.Buffer + fmt.Fprintf(&b, "%5s %-25s", level, e.Message) + + for _, f := range fields { + fmt.Fprintf(&b, " %s=%v", f.Name, f.Value) + } + + log.Println(b.String()) + + return nil +} diff --git a/vendor/github.com/apex/log/doc.go b/vendor/github.com/apex/log/doc.go new file mode 100644 index 0000000000..0331e8e163 --- /dev/null +++ b/vendor/github.com/apex/log/doc.go @@ -0,0 +1,10 @@ +/* +Package log implements a simple structured logging API designed with few assumptions. Designed for +centralized logging solutions such as Kinesis which require encoding and decoding before fanning-out +to handlers. + +You may use this package with inline handlers, much like Logrus, however a centralized solution +is recommended so that apps do not need to be re-deployed to add or remove logging service +providers. +*/ +package log diff --git a/vendor/github.com/apex/log/entry.go b/vendor/github.com/apex/log/entry.go new file mode 100644 index 0000000000..9f0a5e1fda --- /dev/null +++ b/vendor/github.com/apex/log/entry.go @@ -0,0 +1,172 @@ +package log + +import ( + "fmt" + "os" + "strings" + "time" +) + +// assert interface compliance. +var _ Interface = (*Entry)(nil) + +// Now returns the current time. +var Now = time.Now + +// Entry represents a single log entry. +type Entry struct { + Logger *Logger `json:"-"` + Fields Fields `json:"fields"` + Level Level `json:"level"` + Timestamp time.Time `json:"timestamp"` + Message string `json:"message"` + start time.Time + fields []Fields +} + +// NewEntry returns a new entry for `log`. +func NewEntry(log *Logger) *Entry { + return &Entry{ + Logger: log, + } +} + +// WithFields returns a new entry with `fields` set. +func (e *Entry) WithFields(fields Fielder) *Entry { + f := []Fields{} + f = append(f, e.fields...) + f = append(f, fields.Fields()) + return &Entry{ + Logger: e.Logger, + fields: f, + } +} + +// WithField returns a new entry with the `key` and `value` set. +func (e *Entry) WithField(key string, value interface{}) *Entry { + return e.WithFields(Fields{key: value}) +} + +// WithError returns a new entry with the "error" set to `err`. +// +// The given error may implement .Fielder, if it does the method +// will add all its `.Fields()` into the returned entry. +func (e *Entry) WithError(err error) *Entry { + ctx := e.WithField("error", err.Error()) + + if s, ok := err.(stackTracer); ok { + frame := s.StackTrace()[0] + + name := fmt.Sprintf("%n", frame) + file := fmt.Sprintf("%+s", frame) + line := fmt.Sprintf("%d", frame) + + parts := strings.Split(file, "\n\t") + if len(parts) > 1 { + file = parts[1] + } + + ctx = ctx.WithField("source", fmt.Sprintf("%s: %s:%s", name, file, line)) + } + + if f, ok := err.(Fielder); ok { + ctx = ctx.WithFields(f.Fields()) + } + + return ctx +} + +// Debug level message. +func (e *Entry) Debug(msg string) { + e.Logger.log(DebugLevel, e, msg) +} + +// Info level message. +func (e *Entry) Info(msg string) { + e.Logger.log(InfoLevel, e, msg) +} + +// Warn level message. +func (e *Entry) Warn(msg string) { + e.Logger.log(WarnLevel, e, msg) +} + +// Error level message. +func (e *Entry) Error(msg string) { + e.Logger.log(ErrorLevel, e, msg) +} + +// Fatal level message, followed by an exit. +func (e *Entry) Fatal(msg string) { + e.Logger.log(FatalLevel, e, msg) + os.Exit(1) +} + +// Debugf level formatted message. +func (e *Entry) Debugf(msg string, v ...interface{}) { + e.Debug(fmt.Sprintf(msg, v...)) +} + +// Infof level formatted message. +func (e *Entry) Infof(msg string, v ...interface{}) { + e.Info(fmt.Sprintf(msg, v...)) +} + +// Warnf level formatted message. +func (e *Entry) Warnf(msg string, v ...interface{}) { + e.Warn(fmt.Sprintf(msg, v...)) +} + +// Errorf level formatted message. +func (e *Entry) Errorf(msg string, v ...interface{}) { + e.Error(fmt.Sprintf(msg, v...)) +} + +// Fatalf level formatted message, followed by an exit. +func (e *Entry) Fatalf(msg string, v ...interface{}) { + e.Fatal(fmt.Sprintf(msg, v...)) +} + +// Trace returns a new entry with a Stop method to fire off +// a corresponding completion log, useful with defer. +func (e *Entry) Trace(msg string) *Entry { + e.Info(msg) + v := e.WithFields(e.Fields) + v.Message = msg + v.start = time.Now() + return v +} + +// Stop should be used with Trace, to fire off the completion message. When +// an `err` is passed the "error" field is set, and the log level is error. +func (e *Entry) Stop(err *error) { + if err == nil || *err == nil { + e.WithField("duration", time.Since(e.start)).Info(e.Message) + } else { + e.WithField("duration", time.Since(e.start)).WithError(*err).Error(e.Message) + } +} + +// mergedFields returns the fields list collapsed into a single map. +func (e *Entry) mergedFields() Fields { + f := Fields{} + + for _, fields := range e.fields { + for k, v := range fields { + f[k] = v + } + } + + return f +} + +// finalize returns a copy of the Entry with Fields merged. +func (e *Entry) finalize(level Level, msg string) *Entry { + return &Entry{ + Logger: e.Logger, + Fields: e.mergedFields(), + Level: level, + Message: msg, + Timestamp: Now(), + } +} diff --git a/vendor/github.com/apex/log/interface.go b/vendor/github.com/apex/log/interface.go new file mode 100644 index 0000000000..c92ebea75b --- /dev/null +++ b/vendor/github.com/apex/log/interface.go @@ -0,0 +1,19 @@ +package log + +// Interface represents the API of both Logger and Entry. +type Interface interface { + WithFields(fields Fielder) *Entry + WithField(key string, value interface{}) *Entry + WithError(err error) *Entry + Debug(msg string) + Info(msg string) + Warn(msg string) + Error(msg string) + Fatal(msg string) + Debugf(msg string, v ...interface{}) + Infof(msg string, v ...interface{}) + Warnf(msg string, v ...interface{}) + Errorf(msg string, v ...interface{}) + Fatalf(msg string, v ...interface{}) + Trace(msg string) *Entry +} diff --git a/vendor/github.com/apex/log/levels.go b/vendor/github.com/apex/log/levels.go new file mode 100644 index 0000000000..7d43a43609 --- /dev/null +++ b/vendor/github.com/apex/log/levels.go @@ -0,0 +1,81 @@ +package log + +import ( + "bytes" + "errors" + "strings" +) + +// ErrInvalidLevel is returned if the severity level is invalid. +var ErrInvalidLevel = errors.New("invalid level") + +// Level of severity. +type Level int + +// Log levels. +const ( + InvalidLevel Level = iota - 1 + DebugLevel + InfoLevel + WarnLevel + ErrorLevel + FatalLevel +) + +var levelNames = [...]string{ + DebugLevel: "debug", + InfoLevel: "info", + WarnLevel: "warn", + ErrorLevel: "error", + FatalLevel: "fatal", +} + +var levelStrings = map[string]Level{ + "debug": DebugLevel, + "info": InfoLevel, + "warn": WarnLevel, + "warning": WarnLevel, + "error": ErrorLevel, + "fatal": FatalLevel, +} + +// String implementation. +func (l Level) String() string { + return levelNames[l] +} + +// MarshalJSON implementation. +func (l Level) MarshalJSON() ([]byte, error) { + return []byte(`"` + l.String() + `"`), nil +} + +// UnmarshalJSON implementation. +func (l *Level) UnmarshalJSON(b []byte) error { + v, err := ParseLevel(string(bytes.Trim(b, `"`))) + if err != nil { + return err + } + + *l = v + return nil +} + +// ParseLevel parses level string. +func ParseLevel(s string) (Level, error) { + l, ok := levelStrings[strings.ToLower(s)] + if !ok { + return InvalidLevel, ErrInvalidLevel + } + + return l, nil +} + +// MustParseLevel parses level string or panics. +func MustParseLevel(s string) Level { + l, err := ParseLevel(s) + if err != nil { + panic("invalid log level") + } + + return l +} diff --git a/vendor/github.com/apex/log/logger.go b/vendor/github.com/apex/log/logger.go new file mode 100644 index 0000000000..1755747c91 --- /dev/null +++ b/vendor/github.com/apex/log/logger.go @@ -0,0 +1,149 @@ +package log + +import ( + stdlog "log" + "sort" +) + +// assert interface compliance. +var _ Interface = (*Logger)(nil) + +// Fielder is an interface for providing fields to custom types. +type Fielder interface { + Fields() Fields +} + +// Fields represents a map of entry level data used for structured logging. +type Fields map[string]interface{} + +// Fields implements Fielder. +func (f Fields) Fields() Fields { + return f +} + +// Get field value by name. +func (f Fields) Get(name string) interface{} { + return f[name] +} + +// Names returns field names sorted. +func (f Fields) Names() (v []string) { + for k := range f { + v = append(v, k) + } + + sort.Strings(v) + return +} + +// The HandlerFunc type is an adapter to allow the use of ordinary functions as +// log handlers. If f is a function with the appropriate signature, +// HandlerFunc(f) is a Handler object that calls f. +type HandlerFunc func(*Entry) error + +// HandleLog calls f(e). +func (f HandlerFunc) HandleLog(e *Entry) error { + return f(e) +} + +// Handler is used to handle log events, outputting them to +// stdio or sending them to remote services. See the "handlers" +// directory for implementations. +// +// It is left up to Handlers to implement thread-safety. +type Handler interface { + HandleLog(*Entry) error +} + +// Logger represents a logger with configurable Level and Handler. +type Logger struct { + Handler Handler + Level Level +} + +// WithFields returns a new entry with `fields` set. +func (l *Logger) WithFields(fields Fielder) *Entry { + return NewEntry(l).WithFields(fields.Fields()) +} + +// WithField returns a new entry with the `key` and `value` set. +// +// Note that the `key` should not have spaces in it - use camel +// case or underscores +func (l *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(l).WithField(key, value) +} + +// WithError returns a new entry with the "error" set to `err`. +func (l *Logger) WithError(err error) *Entry { + return NewEntry(l).WithError(err) +} + +// Debug level message. +func (l *Logger) Debug(msg string) { + NewEntry(l).Debug(msg) +} + +// Info level message. +func (l *Logger) Info(msg string) { + NewEntry(l).Info(msg) +} + +// Warn level message. +func (l *Logger) Warn(msg string) { + NewEntry(l).Warn(msg) +} + +// Error level message. +func (l *Logger) Error(msg string) { + NewEntry(l).Error(msg) +} + +// Fatal level message, followed by an exit. +func (l *Logger) Fatal(msg string) { + NewEntry(l).Fatal(msg) +} + +// Debugf level formatted message. +func (l *Logger) Debugf(msg string, v ...interface{}) { + NewEntry(l).Debugf(msg, v...) +} + +// Infof level formatted message. +func (l *Logger) Infof(msg string, v ...interface{}) { + NewEntry(l).Infof(msg, v...) +} + +// Warnf level formatted message. +func (l *Logger) Warnf(msg string, v ...interface{}) { + NewEntry(l).Warnf(msg, v...) +} + +// Errorf level formatted message. +func (l *Logger) Errorf(msg string, v ...interface{}) { + NewEntry(l).Errorf(msg, v...) +} + +// Fatalf level formatted message, followed by an exit. +func (l *Logger) Fatalf(msg string, v ...interface{}) { + NewEntry(l).Fatalf(msg, v...) +} + +// Trace returns a new entry with a Stop method to fire off +// a corresponding completion log, useful with defer. +func (l *Logger) Trace(msg string) *Entry { + return NewEntry(l).Trace(msg) +} + +// log the message, invoking the handler. We clone the entry here +// to bypass the overhead in Entry methods when the level is not +// met. +func (l *Logger) log(level Level, e *Entry, msg string) { + if level < l.Level { + return + } + + if err := l.Handler.HandleLog(e.finalize(level, msg)); err != nil { + stdlog.Printf("error logging: %s", err) + } +} diff --git a/vendor/github.com/apex/log/pkg.go b/vendor/github.com/apex/log/pkg.go new file mode 100644 index 0000000000..9bf51dc8e5 --- /dev/null +++ b/vendor/github.com/apex/log/pkg.go @@ -0,0 +1,100 @@ +package log + +// singletons ftw? +var Log Interface = &Logger{ + Handler: HandlerFunc(handleStdLog), + Level: InfoLevel, +} + +// SetHandler sets the handler. This is not thread-safe. +// The default handler outputs to the stdlib log. +func SetHandler(h Handler) { + if logger, ok := Log.(*Logger); ok { + logger.Handler = h + } +} + +// SetLevel sets the log level. This is not thread-safe. +func SetLevel(l Level) { + if logger, ok := Log.(*Logger); ok { + logger.Level = l + } +} + +// SetLevelFromString sets the log level from a string, panicing when invalid. This is not thread-safe. +func SetLevelFromString(s string) { + if logger, ok := Log.(*Logger); ok { + logger.Level = MustParseLevel(s) + } +} + +// WithFields returns a new entry with `fields` set. +func WithFields(fields Fielder) *Entry { + return Log.WithFields(fields) +} + +// WithField returns a new entry with the `key` and `value` set. +func WithField(key string, value interface{}) *Entry { + return Log.WithField(key, value) +} + +// WithError returns a new entry with the "error" set to `err`. +func WithError(err error) *Entry { + return Log.WithError(err) +} + +// Debug level message. +func Debug(msg string) { + Log.Debug(msg) +} + +// Info level message. +func Info(msg string) { + Log.Info(msg) +} + +// Warn level message. +func Warn(msg string) { + Log.Warn(msg) +} + +// Error level message. +func Error(msg string) { + Log.Error(msg) +} + +// Fatal level message, followed by an exit. +func Fatal(msg string) { + Log.Fatal(msg) +} + +// Debugf level formatted message. +func Debugf(msg string, v ...interface{}) { + Log.Debugf(msg, v...) +} + +// Infof level formatted message. +func Infof(msg string, v ...interface{}) { + Log.Infof(msg, v...) +} + +// Warnf level formatted message. +func Warnf(msg string, v ...interface{}) { + Log.Warnf(msg, v...) +} + +// Errorf level formatted message. +func Errorf(msg string, v ...interface{}) { + Log.Errorf(msg, v...) +} + +// Fatalf level formatted message, followed by an exit. +func Fatalf(msg string, v ...interface{}) { + Log.Fatalf(msg, v...) +} + +// Trace returns a new entry with a Stop method to fire off +// a corresponding completion log, useful with defer. +func Trace(msg string) *Entry { + return Log.Trace(msg) +} diff --git a/vendor/github.com/apex/log/stack.go b/vendor/github.com/apex/log/stack.go new file mode 100644 index 0000000000..57efe3262e --- /dev/null +++ b/vendor/github.com/apex/log/stack.go @@ -0,0 +1,8 @@ +package log + +import "github.com/pkg/errors" + +// stackTracer interface. +type stackTracer interface { + StackTrace() errors.StackTrace +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml new file mode 100644 index 0000000000..3938f38349 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml @@ -0,0 +1,19 @@ +# Copyright (C) 2017 SUSE LLC. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +language: go +go: + - 1.7.x + - 1.8.x + - tip + +os: + - linux + - osx + +script: + - go test -cover -v ./... + +notifications: + email: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE new file mode 100644 index 0000000000..bec842f294 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -0,0 +1,28 @@ +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md new file mode 100644 index 0000000000..49b2baa9f3 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -0,0 +1,65 @@ +## `filepath-securejoin` ## + +[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin) + +An implementation of `SecureJoin`, a [candidate for inclusion in the Go +standard library][go#20126]. The purpose of this function is to be a "secure" +alternative to `filepath.Join`, and in particular it provides certain +guarantees that are not provided by `filepath.Join`. + +This is the function prototype: + +```go +func SecureJoin(root, unsafePath string) (string, error) +``` + +This library **guarantees** the following: + +* If no error is set, the resulting string **must** be a child path of + `SecureJoin` and will not contain any symlink path components (they will all + be expanded). + +* When expanding symlinks, all symlink path components **must** be resolved + relative to the provided root. In particular, this can be considered a + userspace implementation of how `chroot(2)` operates on file paths. Note that + these symlinks will **not** be expanded lexically (`filepath.Clean` is not + called on the input before processing). + +* Non-existant path components are unaffected by `SecureJoin` (similar to + `filepath.EvalSymlinks`'s semantics). + +* The returned path will always be `filepath.Clean`ed and thus not contain any + `..` components. + +A (trivial) implementation of this function on GNU/Linux systems could be done +with the following (note that this requires root privileges and is far more +opaque than the implementation in this library, and also requires that +`readlink` is inside the `root` path): + +```go +package securejoin + +import ( + "os/exec" + "path/filepath" +) + +func SecureJoin(root, unsafePath string) (string, error) { + unsafePath = string(filepath.Separator) + unsafePath + cmd := exec.Command("chroot", root, + "readlink", "--canonicalize-missing", "--no-newline", unsafePath) + output, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + expanded := string(output) + return filepath.Join(root, expanded), nil +} +``` + +[go#20126]: https://github.com/golang/go/issues/20126 + +### License ### + +The license of this project is the same as Go, which is a BSD 3-clause license +available in the `LICENSE` file. diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION new file mode 100644 index 0000000000..ee1372d33a --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -0,0 +1 @@ +0.2.2 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go new file mode 100644 index 0000000000..c4ca3d7130 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -0,0 +1,134 @@ +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package securejoin is an implementation of the hopefully-soon-to-be-included +// SecureJoin helper that is meant to be part of the "path/filepath" package. +// The purpose of this project is to provide a PoC implementation to make the +// SecureJoin proposal (https://github.com/golang/go/issues/20126) more +// tangible. +package securejoin + +import ( + "bytes" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/pkg/errors" +) + +// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been +// evaluated in attempting to securely join the two given paths. +var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join") + +// IsNotExist tells you if err is an error that implies that either the path +// accessed does not exist (or path components don't exist). This is +// effectively a more broad version of os.IsNotExist. +func IsNotExist(err error) bool { + // If it's a bone-fide ENOENT just bail. + if os.IsNotExist(errors.Cause(err)) { + return true + } + + // Check that it's not actually an ENOTDIR, which in some cases is a more + // convoluted case of ENOENT (usually involving weird paths). + var errno error + switch err := errors.Cause(err).(type) { + case *os.PathError: + errno = err.Err + case *os.LinkError: + errno = err.Err + case *os.SyscallError: + errno = err.Err + } + return errno == syscall.ENOTDIR || errno == syscall.ENOENT +} + +// SecureJoinVFS joins the two given path components (similar to Join) except +// that the returned path is guaranteed to be scoped inside the provided root +// path (when evaluated). Any symbolic links in the path are evaluated with the +// given root treated as the root of the filesystem, similar to a chroot. The +// filesystem state is evaluated through the given VFS interface (if nil, the +// standard os.* family of functions are used). +// +// Note that the guarantees provided by this function only apply if the path +// components in the returned string are not modified (in other words are not +// replaced with symlinks on the filesystem) after this function has returned. +// Such a symlink race is necessarily out-of-scope of SecureJoin. +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { + // Use the os.* VFS implementation if none was specified. + if vfs == nil { + vfs = osVFS{} + } + + var path bytes.Buffer + n := 0 + for unsafePath != "" { + if n > 255 { + return "", ErrSymlinkLoop + } + + // Next path component, p. + i := strings.IndexRune(unsafePath, filepath.Separator) + var p string + if i == -1 { + p, unsafePath = unsafePath, "" + } else { + p, unsafePath = unsafePath[:i], unsafePath[i+1:] + } + + // Create a cleaned path, using the lexical semantics of /../a, to + // create a "scoped" path component which can safely be joined to fullP + // for evaluation. At this point, path.String() doesn't contain any + // symlink components. + cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) + if cleanP == string(filepath.Separator) { + path.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + // Figure out whether the path is a symlink. + fi, err := vfs.Lstat(fullP) + if err != nil && !IsNotExist(err) { + return "", err + } + // Treat non-existent path components the same as non-symlinks (we + // can't do any better here). + if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { + path.WriteString(p) + path.WriteRune(filepath.Separator) + continue + } + + // Only increment when we actually dereference a link. + n++ + + // It's a symlink, expand it by prepending it to the yet-unparsed path. + dest, err := vfs.Readlink(fullP) + if err != nil { + return "", err + } + // Absolute symlinks reset any work we've already done. + if filepath.IsAbs(dest) { + path.Reset() + } + unsafePath = dest + string(filepath.Separator) + unsafePath + } + + // We have to clean path.String() here because it may contain '..' + // components that are entirely lexical, but would be misleading otherwise. + // And finally do a final clean to ensure that root is also lexically + // clean. + fullP := filepath.Clean(string(filepath.Separator) + path.String()) + return filepath.Clean(root + fullP), nil +} + +// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library +// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. +func SecureJoin(root, unsafePath string) (string, error) { + return SecureJoinVFS(root, unsafePath, nil) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf new file mode 100644 index 0000000000..66bb574b95 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf @@ -0,0 +1 @@ +github.com/pkg/errors v0.8.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go new file mode 100644 index 0000000000..a82a5eae11 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -0,0 +1,41 @@ +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import "os" + +// In future this should be moved into a separate package, because now there +// are several projects (umoci and go-mtree) that are using this sort of +// interface. + +// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is +// equivalent to using the standard os.* family of functions. This is mainly +// used for the purposes of mock testing, but also can be used to otherwise use +// SecureJoin with VFS-like system. +type VFS interface { + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. These semantics are identical to + // os.Lstat. + Lstat(name string) (os.FileInfo, error) + + // Readlink returns the destination of the named symbolic link. These + // semantics are identical to os.Readlink. + Readlink(name string) (string, error) +} + +// osVFS is the "nil" VFS, in that it just passes everything through to the os +// module. +type osVFS struct{} + +// Lstat returns a FileInfo describing the named file. If the file is a +// symbolic link, the returned FileInfo describes the symbolic link. Lstat +// makes no attempt to follow the link. These semantics are identical to +// os.Lstat. +func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +// Readlink returns the destination of the named symbolic link. These +// semantics are identical to os.Readlink. +func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md index 1c95f59782..444df08f8e 100644 --- a/vendor/github.com/hashicorp/errwrap/README.md +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -48,7 +48,7 @@ func main() { // We can use the Contains helpers to check if an error contains // another error. It is safe to do this with a nil error, or with // an error that doesn't even use the errwrap package. - if errwrap.Contains(err, ErrNotExist) { + if errwrap.Contains(err, "does not exist") { // Do something } if errwrap.ContainsType(err, new(os.PathError)) { diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go index 6c7a3cc91d..47f13c49a6 100644 --- a/vendor/github.com/hashicorp/go-multierror/format.go +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -13,7 +13,7 @@ type ErrorFormatFunc func([]error) string // that occurred along with a bullet point list of the errors. func ListFormatFunc(es []error) string { if len(es) == 1 { - return fmt.Sprintf("1 error occurred:\n\n* %s", es[0]) + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) } points := make([]string, len(es)) @@ -22,6 +22,6 @@ func ListFormatFunc(es []error) string { } return fmt.Sprintf( - "%d errors occurred:\n\n%s", - len(es), strings.Join(points, "\n")) + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) } diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 0000000000..fecb14e81c --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 0000000000..7448756763 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/flate/copy.go b/vendor/github.com/klauspost/compress/flate/copy.go new file mode 100644 index 0000000000..a3200a8f49 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/copy.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// forwardCopy is like the built-in copy function except that it always goes +// forward from the start, even if the dst and src overlap. +// It is equivalent to: +// for i := 0; i < n; i++ { +// mem[dst+i] = mem[src+i] +// } +func forwardCopy(mem []byte, dst, src, n int) { + if dst <= src { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + for { + if dst >= src+n { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + // There is some forward overlap. The destination + // will be filled with a repeated pattern of mem[src:src+k]. + // We copy one instance of the pattern here, then repeat. + // Each time around this loop k will double. + k := dst - src + copy(mem[dst:dst+k], mem[src:src+k]) + n -= k + dst += k + } +} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go new file mode 100644 index 0000000000..8298d309ae --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go @@ -0,0 +1,42 @@ +//+build !noasm +//+build !appengine +//+build !gccgo + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +import ( + "github.com/klauspost/cpuid" +) + +// crc32sse returns a hash for the first 4 bytes of the slice +// len(a) must be >= 4. +//go:noescape +func crc32sse(a []byte) uint32 + +// crc32sseAll calculates hashes for each 4-byte set in a. +// dst must be east len(a) - 4 in size. +// The size is not checked by the assembly. +//go:noescape +func crc32sseAll(a []byte, dst []uint32) + +// matchLenSSE4 returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +// +// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions. +// +//go:noescape +func matchLenSSE4(a, b []byte, max int) int + +// histogram accumulates a histogram of b in h. +// h must be at least 256 entries in length, +// and must be cleared before calling this function. +//go:noescape +func histogram(b []byte, h []int32) + +// Detect SSE 4.2 feature. +func init() { + useSSE42 = cpuid.CPU.SSE42() +} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s new file mode 100644 index 0000000000..a799437270 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s @@ -0,0 +1,214 @@ +//+build !noasm +//+build !appengine +//+build !gccgo + +// Copyright 2015, Klaus Post, see LICENSE for details. + +// func crc32sse(a []byte) uint32 +TEXT ·crc32sse(SB), 4, $0 + MOVQ a+0(FP), R10 + XORQ BX, BX + + // CRC32 dword (R10), EBX + BYTE $0xF2; BYTE $0x41; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0x1a + + MOVL BX, ret+24(FP) + RET + +// func crc32sseAll(a []byte, dst []uint32) +TEXT ·crc32sseAll(SB), 4, $0 + MOVQ a+0(FP), R8 // R8: src + MOVQ a_len+8(FP), R10 // input length + MOVQ dst+24(FP), R9 // R9: dst + SUBQ $4, R10 + JS end + JZ one_crc + MOVQ R10, R13 + SHRQ $2, R10 // len/4 + ANDQ $3, R13 // len&3 + XORQ BX, BX + ADDQ $1, R13 + TESTQ R10, R10 + JZ rem_loop + +crc_loop: + MOVQ (R8), R11 + XORQ BX, BX + XORQ DX, DX + XORQ DI, DI + MOVQ R11, R12 + SHRQ $8, R11 + MOVQ R12, AX + MOVQ R11, CX + SHRQ $16, R12 + SHRQ $16, R11 + MOVQ R12, SI + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + // CRC32 ECX, EDX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd1 + + // CRC32 ESI, EDI + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xfe + MOVL BX, (R9) + MOVL DX, 4(R9) + MOVL DI, 8(R9) + + XORQ BX, BX + MOVL R11, AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + MOVL BX, 12(R9) + + ADDQ $16, R9 + ADDQ $4, R8 + XORQ BX, BX + SUBQ $1, R10 + JNZ crc_loop + +rem_loop: + MOVL (R8), AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + MOVL BX, (R9) + ADDQ $4, R9 + ADDQ $1, R8 + XORQ BX, BX + SUBQ $1, R13 + JNZ rem_loop + +end: + RET + +one_crc: + MOVQ $1, R13 + XORQ BX, BX + JMP rem_loop + +// func matchLenSSE4(a, b []byte, max int) int +TEXT ·matchLenSSE4(SB), 4, $0 + MOVQ a_base+0(FP), SI + MOVQ b_base+24(FP), DI + MOVQ DI, DX + MOVQ max+48(FP), CX + +cmp8: + // As long as we are 8 or more bytes before the end of max, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ CX, $8 + JLT cmp1 + MOVQ (SI), AX + MOVQ (DI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, SI + ADDQ $8, DI + SUBQ $8, CX + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, DI + + // Subtract off &b[0] to convert from &b[ret] to ret, and return. + SUBQ DX, DI + MOVQ DI, ret+56(FP) + RET + +cmp1: + // In the slices' tail, compare 1 byte at a time. + CMPQ CX, $0 + JEQ matchLenEnd + MOVB (SI), AX + MOVB (DI), BX + CMPB AX, BX + JNE matchLenEnd + ADDQ $1, SI + ADDQ $1, DI + SUBQ $1, CX + JMP cmp1 + +matchLenEnd: + // Subtract off &b[0] to convert from &b[ret] to ret, and return. + SUBQ DX, DI + MOVQ DI, ret+56(FP) + RET + +// func histogram(b []byte, h []int32) +TEXT ·histogram(SB), 4, $0 + MOVQ b+0(FP), SI // SI: &b + MOVQ b_len+8(FP), R9 // R9: len(b) + MOVQ h+24(FP), DI // DI: Histogram + MOVQ R9, R8 + SHRQ $3, R8 + JZ hist1 + XORQ R11, R11 + +loop_hist8: + MOVQ (SI), R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + INCL (DI)(R10*4) + + ADDQ $8, SI + DECQ R8 + JNZ loop_hist8 + +hist1: + ANDQ $7, R9 + JZ end_hist + XORQ R10, R10 + +loop_hist1: + MOVB (SI), R10 + INCL (DI)(R10*4) + INCQ SI + DECQ R9 + JNZ loop_hist1 + +end_hist: + RET diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go new file mode 100644 index 0000000000..dcf43bd50a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go @@ -0,0 +1,35 @@ +//+build !amd64 noasm appengine gccgo + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +func init() { + useSSE42 = false +} + +// crc32sse should never be called. +func crc32sse(a []byte) uint32 { + panic("no assembler") +} + +// crc32sseAll should never be called. +func crc32sseAll(a []byte, dst []uint32) { + panic("no assembler") +} + +// matchLenSSE4 should never be called. +func matchLenSSE4(a, b []byte, max int) int { + panic("no assembler") + return 0 +} + +// histogram accumulates a histogram of b in h. +// +// len(h) must be >= 256, and h's elements must be all zeroes. +func histogram(b []byte, h []int32) { + h = h[:256] + for _, t := range b { + h[t]++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 0000000000..9e6e7ff0cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,1353 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we put into a single flat block, just too + // stop things from getting too large. + maxFlateBlockTokens = 1 << 14 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 24 + + skipNever = math.MaxInt32 +) + +var useSSE42 bool + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-4 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + // For levels 5-6 we don't bother trying with lazy matches. + // Lazy matching is at least 30% slower, with 1.5% increase. + {6, 0, 12, 8, 12, 5}, + {8, 0, 24, 16, 16, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 8, 24, 16, skipNever, 7}, + {10, 16, 24, 64, skipNever, 8}, + {32, 258, 258, 4096, skipNever, 9}, +} + +type compressor struct { + compressionLevel + + w *huffmanBitWriter + bulkHasher func([]byte, []uint32) + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + sync bool // requesting flush + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + chainHead int + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 + hashOffset int + + // input window: unprocessed data is window[index:windowEnd] + index int + window []byte + windowEnd int + blockStart int // window index where current tokens start + byteAvailable bool // if true, still need to process window[index-1]. + + // queued output tokens + tokens tokens + + // deflate state + length int + offset int + hash uint32 + maxInsertIndex int + err error + ii uint16 // position of last match, intended to overflow to reset. + + snap snappyEnc + hashMatch [maxMatchLength + minMatchLength]uint32 +} + +func (d *compressor) fillDeflate(b []byte) int { + if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + copy(d.window[:], d.window[windowSize:2*windowSize]) + d.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + d.hashOffset += windowSize + if d.hashOffset > maxHashOffset { + delta := d.hashOffset - 1 + d.hashOffset -= delta + d.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range d.hashPrev[:] { + if int(v) > delta { + d.hashPrev[i] = uint32(int(v) - delta) + } else { + d.hashPrev[i] = 0 + } + } + for i, v := range d.hashHead[:] { + if int(v) > delta { + d.hashHead[i] = uint32(int(v) - delta) + } else { + d.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + d.w.writeBlock(tok.tokens[:tok.n], eof, window) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window) + } + } else { + d.w.writeBlock(tok.tokens[:tok.n], eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only mode, + // use constant or Snappy compression. + switch d.compressionLevel.level { + case 0, 1, 2: + return + } + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := d.hashMatch[:dstSize] + d.bulkHasher(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + // Update window information. + d.windowEnd += n + d.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLenSSE4(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +const hashmul = 0x1e35a7bd + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < minMatchLength { + return + } + hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + dst[0] = (hb * hashmul) >> (32 - hashBits) + end := len(b) - minMatchLength + 1 + for i := 1; i < end; i++ { + hb = (hb << 8) | uint32(b[i+3]) + dst[i] = (hb * hashmul) >> (32 - hashBits) + } +} + +// matchLen returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +func matchLen(a, b []byte, max int) int { + a = a[:max] + b = b[:len(a)] + for i, av := range a { + if b[i] != av { + return i + } + } + return max +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.hashOffset = 1 + d.length = minMatchLength - 1 + d.offset = 0 + d.byteAvailable = false + d.index = 0 + d.hash = 0 + d.chainHead = -1 + d.bulkHasher = bulkHash4 + if useSSE42 { + d.bulkHasher = crc32sseAll + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateLazy +func (d *compressor) deflate() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + ch := d.hashHead[d.hash&hashMask] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + ch := d.hashHead[d.hash&hashMask] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 5) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateLazySSE +func (d *compressor) deflateSSE() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = uint32(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + + crc32sseAll(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>5) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazySSE() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = uint32(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + crc32sseAll(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 6) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeSnappy() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < maxStoreBlockSize { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.tokens.n = 0 + d.windowEnd = 0 + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + } + d.tokens.n = 0 + d.windowEnd = 0 + d.snap.Reset() + return + } + } + + d.snap.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if int(d.tokens.n) == d.windowEnd { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd]) + d.err = d.w.err + } + d.tokens.n = 0 + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + d.step(d) + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level >= 1 && level <= 4: + d.snap = newSnappy(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeSnappy + case level == DefaultCompression: + level = 5 + fallthrough + case 5 <= level && level <= 9: + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + if d.fastSkipHashing == skipNever { + if useSSE42 { + d.step = (*compressor).deflateLazySSE + } else { + d.step = (*compressor).deflateLazy + } + } else { + if useSSE42 { + d.step = (*compressor).deflateSSE + } else { + d.step = (*compressor).deflate + + } + } + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.snap != nil { + d.snap.Reset() + d.windowEnd = 0 + d.tokens.n = 0 + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + d.chainHead = -1 + for i := range d.hashHead { + d.hashHead[i] = 0 + } + for i := range d.hashPrev { + d.hashPrev[i] = 0 + } + d.hashOffset = 1 + d.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.n = 0 + d.length = minMatchLength - 1 + d.offset = 0 + d.hash = 0 + d.ii = 0 + d.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + dw := &dictWriter{w} + zw, err := NewWriter(dw, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +type dictWriter struct { + w io.Writer +} + +func (w *dictWriter) Write(b []byte) (n int, err error) { + return w.w.Write(b) +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if dw, ok := w.d.w.writer.(*dictWriter); ok { + // w was created with NewWriterDict + dw.w = dst + w.d.reset(dw) + w.d.fillWindow(w.dict) + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go new file mode 100644 index 0000000000..71c75a065e --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// * Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// * Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go new file mode 100644 index 0000000000..154c89a488 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/gen.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates fixedhuff.go +// Invoke as +// +// go run gen.go -output fixedhuff.go + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "log" +) + +var filename = flag.String("output", "fixedhuff.go", "output file name") + +const maxCodeLen = 16 + +// Note: the definition of the huffmanDecoder struct is copied from +// inflate.go, as it is private to the implementation. + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks [huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(bits []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.min != 0 { + *h = huffmanDecoder{} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range bits { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i] = code + code += count[i] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + h.links = make([][]uint32, huffmanNumChunks-link) + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 + reverse >>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +func main() { + flag.Parse() + + var h huffmanDecoder + var bits [288]int + initReverseByte() + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + h.init(bits[:]) + if h.links != nil { + log.Fatal("Unexpected links table in fixed Huffman decoder") + } + + var buf bytes.Buffer + + fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file.`+"\n\n") + + fmt.Fprintln(&buf, "package flate") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") + fmt.Fprintf(&buf, "\t%d,\n", h.min) + fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") + for i := 0; i < huffmanNumChunks; i++ { + if i&7 == 0 { + fmt.Fprintf(&buf, "\t\t") + } else { + fmt.Fprintf(&buf, " ") + } + fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) + if i&7 == 7 { + fmt.Fprintln(&buf) + } + } + fmt.Fprintln(&buf, "\t},") + fmt.Fprintln(&buf, "\tnil, 0,") + fmt.Fprintln(&buf, "}") + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { + log.Fatal(err) + } +} + +var reverseByte [256]byte + +func initReverseByte() { + for x := 0; x < 256; x++ { + var result byte + for i := uint(0); i < 8; i++ { + result |= byte(((x >> i) & 1) << (7 - i)) + } + reverseByte[x] = result + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 0000000000..f9b2a699a3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,701 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "io" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 240 + + // bufferSize is the actual output byte buffer size. + // It must have additional headroom for a flush + // which can contain up to 8 bytes. + bufferSize = bufferFlushSize + 8 +) + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = []int8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = []uint32{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// offset code word extra bits. +var offsetExtraBits = []int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, +} + +var offsetBase = []uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, + 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, + 0x100000, 0x180000, 0x200000, 0x300000, +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint + bytes [bufferSize]byte + codegenFreq [codegenCodeCount]int32 + nbytes int + literalFreq []int32 + offsetFreq []int32 + codegen []uint8 + literalEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error +} + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalFreq: make([]int32, maxNumLit), + offsetFreq: make([]int32, offsetCodeCount), + codegen: make([]uint8, maxNumLit+offsetCodeCount+1), + literalEncoding: newHuffmanEncoder(maxNumLit), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.bytes = [bufferSize]byte{} +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint) { + if w.err != nil { + return + } + w.bits |= uint64(b) << w.nbits + w.nbits += nb + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = uint8(litEnc.codes[i].len) + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = uint8(offEnc.codes[i].len) + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + header := 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7 + size = header + + litEnc.bitLength(w.literalFreq) + + offEnc.bitLength(w.offsetFreq) + + extraBits + + return size, numCodegens +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq) + + fixedOffsetEncoding.bitLength(w.offsetFreq) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + if w.err != nil { + return + } + w.bits |= uint64(c.code) << w.nbits + w.nbits += uint(c.len) + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n + } +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord int = int(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[uint32(codeWord)]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + break + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + break + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + break + } + } +} + +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens = append(tokens, endBlockMarker) + numLiterals, numOffsets := w.indexTokens(tokens) + + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + // We only bother calculating the costs of the extra bits required by + // the length of offset fields (which will be the same for both fixed + // and dynamic encoding), if we need to compare those two encodings + // against stored encoding. + for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { + // First eight length codes have extra size = 0. + extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) + } + for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { + // First four offset codes have extra size = 0. + extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode]) + } + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = w.fixedSize(extraBits) + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize < size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens = append(tokens, endBlockMarker) + numLiterals, numOffsets := w.indexTokens(tokens) + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0) + + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + + // Write the tokens. + w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes) +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { + for i := range w.literalFreq { + w.literalFreq[i] = 0 + } + for i := range w.offsetFreq { + w.offsetFreq[i] = 0 + } + + for _, t := range tokens { + if t < matchType { + w.literalFreq[t.literal()]++ + continue + } + length := t.length() + offset := t.offset() + w.literalFreq[lengthCodesStart+lengthCode(length)]++ + w.offsetFreq[offsetCode(offset)]++ + } + + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + w.literalEncoding.generate(w.literalFreq, 15) + w.offsetEncoding.generate(w.offsetFreq, 15) + return +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + for _, t := range tokens { + if t < matchType { + w.writeCode(leCodes[t.literal()]) + continue + } + // Write the length + length := t.length() + lengthCode := lengthCode(length) + w.writeCode(leCodes[lengthCode+lengthCodesStart]) + extraLengthBits := uint(lengthExtraBits[lengthCode]) + if extraLengthBits > 0 { + extraLength := int32(length - lengthBase[lengthCode]) + w.writeBits(extraLength, extraLengthBits) + } + // Write the offset + offset := t.offset() + offsetCode := offsetCode(offset) + w.writeCode(oeCodes[offsetCode]) + extraOffsetBits := uint(offsetExtraBits[offsetCode]) + if extraOffsetBits > 0 { + extraOffset := int32(offset - offsetBase[offsetCode]) + w.writeBits(extraOffset, extraOffsetBits) + } + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq, 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq { + w.literalFreq[i] = 0 + } + + // Add everything as literals + histogram(input, w.literalFreq) + + w.literalFreq[endBlockMarker] = 1 + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + w.literalEncoding.generate(w.literalFreq, 15) + + // Figure out smallest code. + // Always use dynamic Huffman or Store + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) + + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + encoding := w.literalEncoding.codes[:257] + n := w.nbytes + for _, t := range input { + // Bitwriting inlined, ~30% speedup + c := encoding[t] + w.bits |= uint64(c.code) << w.nbits + w.nbits += uint(c.len) + if w.nbits < 48 { + continue + } + // Store 6 bytes + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n < bufferFlushSize { + continue + } + w.write(w.bytes[:n]) + if w.err != nil { + return // Return early in the event of write failures + } + n = 0 + } + w.nbytes = n + w.writeCode(encoding[endBlockMarker]) +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 0000000000..bdcbd823b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,344 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "sort" +) + +// hcode is a huffman code with a bit code and bit length. +type hcode struct { + code, len uint16 +} + +type huffmanEncoder struct { + codes []hcode + freqcache []literalNode + bitCount [17]int32 + lns byLiteral // stored to avoid repeated allocation in generate + lfs byFreq // stored to avoid repeated allocation in generate +} + +type literalNode struct { + literal uint16 + freq int32 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint16) { + h.len = length + h.code = code +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + return &huffmanEncoder{codes: make([]hcode, size)} +} + +// Generates a HuffmanCode corresponding to the fixed literal table +func generateFixedLiteralEncoding() *huffmanEncoder { + h := newHuffmanEncoder(maxNumLit) + codes := h.codes + var ch uint16 + for ch = 0; ch < maxNumLit; ch++ { + var bits uint16 + var size uint16 + switch { + case ch < 144: + // size 8, 000110000 .. 10111111 + bits = ch + 48 + size = 8 + break + case ch < 256: + // size 9, 110010000 .. 111111111 + bits = ch + 400 - 144 + size = 9 + break + case ch < 280: + // size 7, 0000000 .. 0010111 + bits = ch - 256 + size = 7 + break + default: + // size 8, 11000000 .. 11000111 + bits = ch + 192 - 280 + size = 8 + } + codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + } + return h +} + +func generateFixedOffsetEncoding() *huffmanEncoder { + h := newHuffmanEncoder(30) + codes := h.codes + for ch := range codes { + codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5} + } + return h +} + +var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() +var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() + +func (h *huffmanEncoder) bitLength(freq []int32) int { + var total int + for i, f := range freq { + if f != 0 { + total += int(f) * int(h.codes[i].len) + } + } + return total +} + +const maxBitsLimit = 16 + +// Return the number of literals assigned to each bit size in the Huffman encoding +// +// This method is only called when list.length >= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// maxBits The maximum number of bits that should be used to encode any literal. +// Must be less than 16. +// return An integer array in which array[i] indicates the number of literals +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: list[1].freq, + nextCharFreq: list[2].freq, + nextPairFreq: list[0].freq + list[1].freq, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := maxBits + for { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + l.nextCharFreq = list[n].freq + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + h.lns.sort(chunk) + for _, node := range chunk { + h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { + if h.freqcache == nil { + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit. + // The largest of these is maxNumLit, so we allocate for that case. + h.freqcache = make([]literalNode, maxNumLit+1) + } + list := h.freqcache[:len(freq)+1] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + list[count] = literalNode{} + h.codes[i].len = 0 + } + } + list[len(freq)] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + h.lfs.sort(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +type byLiteral []literalNode + +func (s *byLiteral) sort(a []literalNode) { + *s = byLiteral(a) + sort.Sort(s) +} + +func (s byLiteral) Len() int { return len(s) } + +func (s byLiteral) Less(i, j int) bool { + return s[i].literal < s[j].literal +} + +func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type byFreq []literalNode + +func (s *byFreq) sort(a []literalNode) { + *s = byFreq(a) + sort.Sort(s) +} + +func (s byFreq) Len() int { return len(s) } + +func (s byFreq) Less(i, j int) bool { + if s[i].freq == s[j].freq { + return s[i].literal < s[j].literal + } + return s[i].freq < s[j].freq +} + +func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 0000000000..800d0ce9e5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,880 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "io" + "math/bits" + "strconv" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + maxCodeLenMask = 15 // mask for max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code +) + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError int64 + +func (e CorruptInputError) Error() string { + return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10) +} + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Read +} + +func (e *ReadError) Error() string { + return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Write +} + +func (e *WriteError) Error() string { + return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks *[huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(lengths []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.chunks == nil { + h.chunks = &[huffmanNumChunks]uint32{} + } + if h.min != 0 { + *h = huffmanDecoder{chunks: h.chunks, links: h.links} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range lengths { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n&maxCodeLenMask]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i&maxCodeLenMask] = code + code += count[i&maxCodeLenMask] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + if cap(h.links) < huffmanNumChunks-link { + h.links = make([][]uint32, huffmanNumChunks-link) + } else { + h.links = h.links[:huffmanNumChunks-link] + } + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(bits.Reverse16(uint16(j))) + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// The actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Input bits, in top of b. + b uint32 + nb uint + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Next step in the decompression, + // and decompression state. + step func(*decompressor) + stepState int + final bool + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlock() + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlock() + default: + // 3 is reserved. + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + f.step(f) + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.step(f) + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + return err + } + } + rep += int(f.b & uint32(1<>= nb + f.nb -= nb + if i+rep > n { + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the min bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.min < f.bits[endBlockMarker] { + f.h1.min = f.bits[endBlockMarker] + } + + return nil +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBlock() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + v, err := f.huffSym(f.hl) + if err != nil { + f.err = err + return + } + var n uint // number of bits extra + var length int + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBlock + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + length += int(f.b & uint32(1<>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + extra |= int(f.b & uint32(1<>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBlock // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + f.nb = 0 + f.b = 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[0:4]) + f.roffset += int64(nr) + if err != nil { + f.err = noEOF(err) + return + } + n := int(f.buf[0]) | int(f.buf[1])<<8 + nn := int(f.buf[2]) | int(f.buf[3])<<8 + if uint16(nn) != uint16(^n) { + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = n + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + f.err = noEOF(err) + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = (*decompressor).nextBlock +} + +// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. +func noEOF(e error) error { + if e == io.EOF { + return io.ErrUnexpectedEOF + } + return e +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.min) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := f.r.ReadByte() + if err != nil { + f.b = b + f.nb = nb + return 0, noEOF(err) + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := h.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b = b >> (n & 31) + f.nb = nb - n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + h1: f.h1, + h2: f.h2, + dict: f.dict, + step: (*decompressor).nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/reverse_bits.go b/vendor/github.com/klauspost/compress/flate/reverse_bits.go new file mode 100644 index 0000000000..c1a02720d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/reverse_bits.go @@ -0,0 +1,48 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +var reverseByte = [256]byte{ + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +} + +func reverseUint16(v uint16) uint16 { + return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8 +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return reverseUint16(number << uint8(16-bitLength)) +} diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go new file mode 100644 index 0000000000..d853320a75 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/snappy.go @@ -0,0 +1,900 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + ol := int(dst.n) + for i, v := range lit { + dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) + } + dst.n += uint16(len(lit)) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst *tokens, offset, length int) { + dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize)) + dst.n++ +} + +type snappyEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newSnappy(level int) snappyEnc { + switch level { + case 1: + return &snappyL1{} + case 2: + return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} + case 3: + return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} + case 4: + return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 14 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset +) + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func hash(u uint32) uint32 { + return (u * 0x1e35a7bd) >> tableShift +} + +// snappyL1 encapsulates level 1 compression +type snappyL1 struct{} + +func (e *snappyL1) Reset() {} + +func (e *snappyL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 16 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Initialize the hash table. + // + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535. + var table [tableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s)) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS)) + if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of Snappy's: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + s1 := base + maxMatchLength + if s1 > len(src) { + s1 = len(src) + } + a := src[s:s1] + b := src[candidate+4:] + b = b[:len(a)] + l := len(a) + for i := range a { + if a[i] != b[i] { + l = i + break + } + } + s += l + + // matchToken is flate's equivalent of Snappy's emitCopy. + dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)) + dst.n++ + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x >> 0)) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x >> 8)) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x >> 16)) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + emitLiteral(dst, src[nextEmit:]) + } +} + +type tableEntry struct { + val uint32 + offset int32 +} + +func load3232(b []byte, i int32) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// snappyGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type snappyGen struct { + prev []byte + cur int32 +} + +// snappyGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type snappyL2 struct { + snappyGen + table [tableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *snappyL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxStoreBlockSize + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv} + nextHash = hash(now) + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || cv != candidate.val { + // Out of range or not matched. + cv = now + continue + } + break + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-1) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)} + x >>= 8 + currHash := hash(uint32(x)) + candidate = e.table[currHash&tableMask] + e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != candidate.val { + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// snappyL3 +type snappyL3 struct { + snappyGen + table [tableSize]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *snappyL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + nextHash = hash(now) + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash = hash(cv) + e.table[nextHash&tableMask] = tableEntryPrev{ + Prev: e.table[nextHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash&tableMask] + cv = uint32(x) + e.table[currHash&tableMask] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +// snappyL4 +type snappyL4 struct { + snappyL3 +} + +// Encode uses a similar algorithm to level 3, +// but will check up to two candidates if first isn't long enough. +func (e *snappyL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 3 + minNonLiteralBlockSize = 1 + 1 + inputMargin + matchLenGood = 12 + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + var candidateAlt tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + nextHash = hash(now) + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset { + offset = s - (candidates.Prev.offset - e.cur) + if cv == candidates.Prev.val && offset < maxMatchOffset { + candidateAlt = candidates.Prev + } + break + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset { + break + } + } + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + // Try alternative candidate if match length < matchLenGood. + if l < matchLenGood-4 && candidateAlt.offset != 0 { + t2 := candidateAlt.offset - e.cur + 4 + l2 := e.matchlen(s, t2, src) + if l2 > l { + l = l2 + t = t2 + } + } + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash = hash(cv) + e.table[nextHash&tableMask] = tableEntryPrev{ + Prev: e.table[nextHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash&tableMask] + cv = uint32(x) + e.table[currHash&tableMask] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + candidateAlt = tableEntry{} + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + offset = s - (candidates.Prev.offset - e.cur) + if cv == candidates.Prev.val && offset <= maxMatchOffset { + candidateAlt = candidates.Prev + } + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +func (e *snappyGen) matchlen(s, t int32, src []byte) int32 { + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // If we are inside the current block + if t >= 0 { + b := src[t:] + a := src[s:s1] + b = b[:len(a)] + // Extend the match to be as long as possible. + for i := range a { + if a[i] != b[i] { + return int32(i) + } + } + return int32(len(a)) + } + + // We found a match in the previous block. + tp := int32(len(e.prev)) + t + if tp < 0 { + return 0 + } + + // Extend the match to be as long as possible. + a := src[s:s1] + b := e.prev[tp:] + if len(b) > len(a) { + b = b[:len(a)] + } + a = a[:len(b)] + for i := range b { + if a[i] != b[i] { + return int32(i) + } + } + + // If we reached our limit, we matched everything we are + // allowed to in the previous block and we return. + n := int32(len(b)) + if int(s+n) == s1 { + return n + } + + // Continue looking for more matches in the current block. + a = src[s+n : s1] + b = src[:len(a)] + for i := range a { + if a[i] != b[i] { + return int32(i) + n + } + } + return int32(len(a)) + n +} + +// Reset the encoding table. +func (e *snappyGen) Reset() { + e.prev = e.prev[:0] + e.cur += maxMatchOffset +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go new file mode 100644 index 0000000000..4f275ea61d --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,115 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import "fmt" + +const ( + // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused + // 8 bits: xlength = length - MIN_MATCH_LENGTH + // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + lengthShift = 22 + offsetMask = 1< pair into a match token. +func matchToken(xlength uint32, xoffset uint32) token { + return token(matchType + xlength< maxMatchLength || xoffset > maxMatchOffset { + panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset)) + return token(matchType) + } + return token(matchType + xlength<> lengthShift) } + +func lengthCode(len uint32) uint32 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[off>>7] + 14 + } else { + return offsetCodes[off>>14] + 28 + } +} diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml new file mode 100644 index 0000000000..630192d597 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/.travis.yml @@ -0,0 +1,23 @@ +language: go + +sudo: false + +os: + - linux + - osx +go: + - 1.8.x + - 1.9.x + - 1.10.x + - master + +script: + - go vet ./... + - go test -v ./... + - go test -race ./... + - diff <(gofmt -d .) <("") + +matrix: + allow_failures: + - go: 'master' + fast_finish: true diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt new file mode 100644 index 0000000000..452d28eda8 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt @@ -0,0 +1,35 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2015- Klaus Post & Contributors. +Email: klauspost@gmail.com + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/cpuid/LICENSE b/vendor/github.com/klauspost/cpuid/LICENSE new file mode 100644 index 0000000000..5cec7ee949 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md new file mode 100644 index 0000000000..b2b6bee879 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/README.md @@ -0,0 +1,145 @@ +# cpuid +Package cpuid provides information about the CPU running the current program. + +CPU features are detected on startup, and kept for fast access through the life of the application. +Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. + +You can access the CPU information by accessing the shared CPU variable of the cpuid library. + +Package home: https://github.com/klauspost/cpuid + +[![GoDoc][1]][2] [![Build Status][3]][4] + +[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg +[2]: https://godoc.org/github.com/klauspost/cpuid +[3]: https://travis-ci.org/klauspost/cpuid.svg +[4]: https://travis-ci.org/klauspost/cpuid + +# features +## CPU Instructions +* **CMOV** (i686 CMOV) +* **NX** (NX (No-Execute) bit) +* **AMD3DNOW** (AMD 3DNOW) +* **AMD3DNOWEXT** (AMD 3DNowExt) +* **MMX** (standard MMX) +* **MMXEXT** (SSE integer functions or AMD MMX ext) +* **SSE** (SSE functions) +* **SSE2** (P4 SSE functions) +* **SSE3** (Prescott SSE3 functions) +* **SSSE3** (Conroe SSSE3 functions) +* **SSE4** (Penryn SSE4.1 functions) +* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) +* **SSE42** (Nehalem SSE4.2 functions) +* **AVX** (AVX functions) +* **AVX2** (AVX2 functions) +* **FMA3** (Intel FMA 3) +* **FMA4** (Bulldozer FMA4 functions) +* **XOP** (Bulldozer XOP functions) +* **F16C** (Half-precision floating-point conversion) +* **BMI1** (Bit Manipulation Instruction Set 1) +* **BMI2** (Bit Manipulation Instruction Set 2) +* **TBM** (AMD Trailing Bit Manipulation) +* **LZCNT** (LZCNT instruction) +* **POPCNT** (POPCNT instruction) +* **AESNI** (Advanced Encryption Standard New Instructions) +* **CLMUL** (Carry-less Multiplication) +* **HTT** (Hyperthreading (enabled)) +* **HLE** (Hardware Lock Elision) +* **RTM** (Restricted Transactional Memory) +* **RDRAND** (RDRAND instruction is available) +* **RDSEED** (RDSEED instruction is available) +* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) +* **SHA** (Intel SHA Extensions) +* **AVX512F** (AVX-512 Foundation) +* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) +* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) +* **AVX512PF** (AVX-512 Prefetch Instructions) +* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) +* **AVX512CD** (AVX-512 Conflict Detection Instructions) +* **AVX512BW** (AVX-512 Byte and Word Instructions) +* **AVX512VL** (AVX-512 Vector Length Extensions) +* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) +* **MPX** (Intel MPX (Memory Protection Extensions)) +* **ERMS** (Enhanced REP MOVSB/STOSB) +* **RDTSCP** (RDTSCP Instruction) +* **CX16** (CMPXCHG16B Instruction) +* **SGX** (Software Guard Extensions, with activation details) + +## Performance +* **RDTSCP()** Returns current cycle count. Can be used for benchmarking. +* **SSE2SLOW** (SSE2 is supported, but usually not faster) +* **SSE3SLOW** (SSE3 is supported, but usually not faster) +* **ATOM** (Atom processor, some SSSE3 instructions are slower) +* **Cache line** (Probable size of a cache line). +* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. + +## Cpu Vendor/VM +* **Intel** +* **AMD** +* **VIA** +* **Transmeta** +* **NSC** +* **KVM** (Kernel-based Virtual Machine) +* **MSVM** (Microsoft Hyper-V or Windows Virtual PC) +* **VMware** +* **XenHVM** + +# installing + +```go get github.com/klauspost/cpuid``` + +# example + +```Go +package main + +import ( + "fmt" + "github.com/klauspost/cpuid" +) + +func main() { + // Print basic CPU information: + fmt.Println("Name:", cpuid.CPU.BrandName) + fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores) + fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model) + fmt.Println("Features:", cpuid.CPU.Features) + fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine) + fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes") + fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes") + fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes") + fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes") + + // Test if we have a specific feature: + if cpuid.CPU.SSE() { + fmt.Println("We have Streaming SIMD Extensions") + } +} +``` + +Sample output: +``` +>go run main.go +Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz +PhysicalCores: 2 +ThreadsPerCore: 2 +LogicalCores: 4 +Family 6 Model: 42 +Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL +Cacheline bytes: 64 +We have Streaming SIMD Extensions +``` + +# private package + +In the "private" folder you can find an autogenerated version of the library you can include in your own packages. + +For this purpose all exports are removed, and functions and constants are lowercased. + +This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. + +# license + +This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go new file mode 100644 index 0000000000..60c681bed2 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid.go @@ -0,0 +1,1040 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// Package cpuid provides information about the CPU running the current program. +// +// CPU features are detected on startup, and kept for fast access through the life of the application. +// Currently x86 / x64 (AMD64) is supported. +// +// You can access the CPU information by accessing the shared CPU variable of the cpuid library. +// +// Package home: https://github.com/klauspost/cpuid +package cpuid + +import "strings" + +// Vendor is a representation of a CPU vendor. +type Vendor int + +const ( + Other Vendor = iota + Intel + AMD + VIA + Transmeta + NSC + KVM // Kernel-based Virtual Machine + MSVM // Microsoft Hyper-V or Windows Virtual PC + VMware + XenHVM +) + +const ( + CMOV = 1 << iota // i686 CMOV + NX // NX (No-Execute) bit + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSSE3 // Conroe SSSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSE42 // Nehalem SSE4.2 functions + AVX // AVX functions + AVX2 // AVX2 functions + FMA3 // Intel FMA 3 + FMA4 // Bulldozer FMA4 functions + XOP // Bulldozer XOP functions + F16C // Half-precision floating-point conversion + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + TBM // AMD Trailing Bit Manipulation + LZCNT // LZCNT instruction + POPCNT // POPCNT instruction + AESNI // Advanced Encryption Standard New Instructions + CLMUL // Carry-less Multiplication + HTT // Hyperthreading (enabled) + HLE // Hardware Lock Elision + RTM // Restricted Transactional Memory + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + SHA // Intel SHA Extensions + AVX512F // AVX-512 Foundation + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512BW // AVX-512 Byte and Word Instructions + AVX512VL // AVX-512 Vector Length Extensions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + MPX // Intel MPX (Memory Protection Extensions) + ERMS // Enhanced REP MOVSB/STOSB + RDTSCP // RDTSCP Instruction + CX16 // CMPXCHG16B Instruction + SGX // Software Guard Extensions + IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + STIBP // Single Thread Indirect Branch Predictors + + // Performance indicators + SSE2SLOW // SSE2 is supported, but usually not faster + SSE3SLOW // SSE3 is supported, but usually not faster + ATOM // Atom processor, some SSSE3 instructions are slower +) + +var flagNames = map[Flags]string{ + CMOV: "CMOV", // i686 CMOV + NX: "NX", // NX (No-Execute) bit + AMD3DNOW: "AMD3DNOW", // AMD 3DNOW + AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt + MMX: "MMX", // Standard MMX + MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext + SSE: "SSE", // SSE functions + SSE2: "SSE2", // P4 SSE2 functions + SSE3: "SSE3", // Prescott SSE3 functions + SSSE3: "SSSE3", // Conroe SSSE3 functions + SSE4: "SSE4.1", // Penryn SSE4.1 functions + SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions + SSE42: "SSE4.2", // Nehalem SSE4.2 functions + AVX: "AVX", // AVX functions + AVX2: "AVX2", // AVX functions + FMA3: "FMA3", // Intel FMA 3 + FMA4: "FMA4", // Bulldozer FMA4 functions + XOP: "XOP", // Bulldozer XOP functions + F16C: "F16C", // Half-precision floating-point conversion + BMI1: "BMI1", // Bit Manipulation Instruction Set 1 + BMI2: "BMI2", // Bit Manipulation Instruction Set 2 + TBM: "TBM", // AMD Trailing Bit Manipulation + LZCNT: "LZCNT", // LZCNT instruction + POPCNT: "POPCNT", // POPCNT instruction + AESNI: "AESNI", // Advanced Encryption Standard New Instructions + CLMUL: "CLMUL", // Carry-less Multiplication + HTT: "HTT", // Hyperthreading (enabled) + HLE: "HLE", // Hardware Lock Elision + RTM: "RTM", // Restricted Transactional Memory + RDRAND: "RDRAND", // RDRAND instruction is available + RDSEED: "RDSEED", // RDSEED instruction is available + ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + SHA: "SHA", // Intel SHA Extensions + AVX512F: "AVX512F", // AVX-512 Foundation + AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions + AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions + AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions + AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions + AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions + AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions + AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions + MPX: "MPX", // Intel MPX (Memory Protection Extensions) + ERMS: "ERMS", // Enhanced REP MOVSB/STOSB + RDTSCP: "RDTSCP", // RDTSCP Instruction + CX16: "CX16", // CMPXCHG16B Instruction + SGX: "SGX", // Software Guard Extensions + IBPB: "IBPB", // Indirect Branch Restricted Speculation and Indirect Branch Predictor Barrier + STIBP: "STIBP", // Single Thread Indirect Branch Predictors + + // Performance indicators + SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster + SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster + ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower + +} + +// CPUInfo contains information about the detected system CPU. +type CPUInfo struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + Features Flags // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Cache struct { + L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected + L2 int // L2 Cache (per core or shared). Will be -1 if undetected + L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected + } + SGX SGXSupport + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data, +// this way queries are +var CPU CPUInfo + +func init() { + initCPU() + Detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func Detect() { + CPU.maxFunc = maxFunctionID() + CPU.maxExFunc = maxExtendedFunction() + CPU.BrandName = brandName() + CPU.CacheLine = cacheLine() + CPU.Family, CPU.Model = familyModel() + CPU.Features = support() + CPU.SGX = hasSGX(CPU.Features&SGX != 0) + CPU.ThreadsPerCore = threadsPerCore() + CPU.LogicalCores = logicalCores() + CPU.PhysicalCores = physicalCores() + CPU.VendorID = vendorID() + CPU.cacheSize() +} + +// Generated here: http://play.golang.org/p/BxFH2Gdc0G + +// Cmov indicates support of CMOV instructions +func (c CPUInfo) Cmov() bool { + return c.Features&CMOV != 0 +} + +// Amd3dnow indicates support of AMD 3DNOW! instructions +func (c CPUInfo) Amd3dnow() bool { + return c.Features&AMD3DNOW != 0 +} + +// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions +func (c CPUInfo) Amd3dnowExt() bool { + return c.Features&AMD3DNOWEXT != 0 +} + +// MMX indicates support of MMX instructions +func (c CPUInfo) MMX() bool { + return c.Features&MMX != 0 +} + +// MMXExt indicates support of MMXEXT instructions +// (SSE integer functions or AMD MMX ext) +func (c CPUInfo) MMXExt() bool { + return c.Features&MMXEXT != 0 +} + +// SSE indicates support of SSE instructions +func (c CPUInfo) SSE() bool { + return c.Features&SSE != 0 +} + +// SSE2 indicates support of SSE 2 instructions +func (c CPUInfo) SSE2() bool { + return c.Features&SSE2 != 0 +} + +// SSE3 indicates support of SSE 3 instructions +func (c CPUInfo) SSE3() bool { + return c.Features&SSE3 != 0 +} + +// SSSE3 indicates support of SSSE 3 instructions +func (c CPUInfo) SSSE3() bool { + return c.Features&SSSE3 != 0 +} + +// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions +func (c CPUInfo) SSE4() bool { + return c.Features&SSE4 != 0 +} + +// SSE42 indicates support of SSE4.2 instructions +func (c CPUInfo) SSE42() bool { + return c.Features&SSE42 != 0 +} + +// AVX indicates support of AVX instructions +// and operating system support of AVX instructions +func (c CPUInfo) AVX() bool { + return c.Features&AVX != 0 +} + +// AVX2 indicates support of AVX2 instructions +func (c CPUInfo) AVX2() bool { + return c.Features&AVX2 != 0 +} + +// FMA3 indicates support of FMA3 instructions +func (c CPUInfo) FMA3() bool { + return c.Features&FMA3 != 0 +} + +// FMA4 indicates support of FMA4 instructions +func (c CPUInfo) FMA4() bool { + return c.Features&FMA4 != 0 +} + +// XOP indicates support of XOP instructions +func (c CPUInfo) XOP() bool { + return c.Features&XOP != 0 +} + +// F16C indicates support of F16C instructions +func (c CPUInfo) F16C() bool { + return c.Features&F16C != 0 +} + +// BMI1 indicates support of BMI1 instructions +func (c CPUInfo) BMI1() bool { + return c.Features&BMI1 != 0 +} + +// BMI2 indicates support of BMI2 instructions +func (c CPUInfo) BMI2() bool { + return c.Features&BMI2 != 0 +} + +// TBM indicates support of TBM instructions +// (AMD Trailing Bit Manipulation) +func (c CPUInfo) TBM() bool { + return c.Features&TBM != 0 +} + +// Lzcnt indicates support of LZCNT instruction +func (c CPUInfo) Lzcnt() bool { + return c.Features&LZCNT != 0 +} + +// Popcnt indicates support of POPCNT instruction +func (c CPUInfo) Popcnt() bool { + return c.Features&POPCNT != 0 +} + +// HTT indicates the processor has Hyperthreading enabled +func (c CPUInfo) HTT() bool { + return c.Features&HTT != 0 +} + +// SSE2Slow indicates that SSE2 may be slow on this processor +func (c CPUInfo) SSE2Slow() bool { + return c.Features&SSE2SLOW != 0 +} + +// SSE3Slow indicates that SSE3 may be slow on this processor +func (c CPUInfo) SSE3Slow() bool { + return c.Features&SSE3SLOW != 0 +} + +// AesNi indicates support of AES-NI instructions +// (Advanced Encryption Standard New Instructions) +func (c CPUInfo) AesNi() bool { + return c.Features&AESNI != 0 +} + +// Clmul indicates support of CLMUL instructions +// (Carry-less Multiplication) +func (c CPUInfo) Clmul() bool { + return c.Features&CLMUL != 0 +} + +// NX indicates support of NX (No-Execute) bit +func (c CPUInfo) NX() bool { + return c.Features&NX != 0 +} + +// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions +func (c CPUInfo) SSE4A() bool { + return c.Features&SSE4A != 0 +} + +// HLE indicates support of Hardware Lock Elision +func (c CPUInfo) HLE() bool { + return c.Features&HLE != 0 +} + +// RTM indicates support of Restricted Transactional Memory +func (c CPUInfo) RTM() bool { + return c.Features&RTM != 0 +} + +// Rdrand indicates support of RDRAND instruction is available +func (c CPUInfo) Rdrand() bool { + return c.Features&RDRAND != 0 +} + +// Rdseed indicates support of RDSEED instruction is available +func (c CPUInfo) Rdseed() bool { + return c.Features&RDSEED != 0 +} + +// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) +func (c CPUInfo) ADX() bool { + return c.Features&ADX != 0 +} + +// SHA indicates support of Intel SHA Extensions +func (c CPUInfo) SHA() bool { + return c.Features&SHA != 0 +} + +// AVX512F indicates support of AVX-512 Foundation +func (c CPUInfo) AVX512F() bool { + return c.Features&AVX512F != 0 +} + +// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions +func (c CPUInfo) AVX512DQ() bool { + return c.Features&AVX512DQ != 0 +} + +// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions +func (c CPUInfo) AVX512IFMA() bool { + return c.Features&AVX512IFMA != 0 +} + +// AVX512PF indicates support of AVX-512 Prefetch Instructions +func (c CPUInfo) AVX512PF() bool { + return c.Features&AVX512PF != 0 +} + +// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions +func (c CPUInfo) AVX512ER() bool { + return c.Features&AVX512ER != 0 +} + +// AVX512CD indicates support of AVX-512 Conflict Detection Instructions +func (c CPUInfo) AVX512CD() bool { + return c.Features&AVX512CD != 0 +} + +// AVX512BW indicates support of AVX-512 Byte and Word Instructions +func (c CPUInfo) AVX512BW() bool { + return c.Features&AVX512BW != 0 +} + +// AVX512VL indicates support of AVX-512 Vector Length Extensions +func (c CPUInfo) AVX512VL() bool { + return c.Features&AVX512VL != 0 +} + +// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions +func (c CPUInfo) AVX512VBMI() bool { + return c.Features&AVX512VBMI != 0 +} + +// MPX indicates support of Intel MPX (Memory Protection Extensions) +func (c CPUInfo) MPX() bool { + return c.Features&MPX != 0 +} + +// ERMS indicates support of Enhanced REP MOVSB/STOSB +func (c CPUInfo) ERMS() bool { + return c.Features&ERMS != 0 +} + +// RDTSCP Instruction is available. +func (c CPUInfo) RDTSCP() bool { + return c.Features&RDTSCP != 0 +} + +// CX16 indicates if CMPXCHG16B instruction is available. +func (c CPUInfo) CX16() bool { + return c.Features&CX16 != 0 +} + +// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection. +// So TSX simply checks that. +func (c CPUInfo) TSX() bool { + return c.Features&(HLE|RTM) == HLE|RTM +} + +// Atom indicates an Atom processor +func (c CPUInfo) Atom() bool { + return c.Features&ATOM != 0 +} + +// Intel returns true if vendor is recognized as Intel +func (c CPUInfo) Intel() bool { + return c.VendorID == Intel +} + +// AMD returns true if vendor is recognized as AMD +func (c CPUInfo) AMD() bool { + return c.VendorID == AMD +} + +// Transmeta returns true if vendor is recognized as Transmeta +func (c CPUInfo) Transmeta() bool { + return c.VendorID == Transmeta +} + +// NSC returns true if vendor is recognized as National Semiconductor +func (c CPUInfo) NSC() bool { + return c.VendorID == NSC +} + +// VIA returns true if vendor is recognized as VIA +func (c CPUInfo) VIA() bool { + return c.VendorID == VIA +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c CPUInfo) RTCounter() uint64 { + if !c.RDTSCP() { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c CPUInfo) Ia32TscAux() uint32 { + if !c.RDTSCP() { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c CPUInfo) LogicalCPU() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. This is only a hint, and will very likely +// have many false negatives. +func (c CPUInfo) VM() bool { + switch c.VendorID { + case MSVM, KVM, VMware, XenHVM: + return true + } + return false +} + +// Flags contains detected cpu features and caracteristics +type Flags uint64 + +// String returns a string representation of the detected +// CPU features. +func (f Flags) String() string { + return strings.Join(f.Strings(), ",") +} + +// Strings returns and array of the detected features. +func (f Flags) Strings() []string { + s := support() + r := make([]string, 0, 20) + for i := uint(0); i < 64; i++ { + key := Flags(1 << i) + val := flagNames[key] + if s&key != 0 { + r = append(r, val) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + if mfi < 0x4 || vendorID() != Intel { + return 1 + } + + if mfi < 0xb { + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + switch vendorID() { + case Intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case AMD: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (int, int) { + if maxFunctionID() < 0x1 { + return 0, 0 + } + eax, _, _, _ := cpuid(1) + family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) + model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) + return int(family), int(model) +} + +func physicalCores() int { + switch vendorID() { + case Intel: + return logicalCores() / threadsPerCore() + case AMD: + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + return int(c&0xff) + 1 + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]Vendor{ + "AMDisbetter!": AMD, + "AuthenticAMD": AMD, + "CentaurHauls": VIA, + "GenuineIntel": Intel, + "TransmetaCPU": Transmeta, + "GenuineTMx86": Transmeta, + "Geode by NSC": NSC, + "VIA VIA VIA ": VIA, + "KVMKVMKVMKVM": KVM, + "Microsoft Hv": MSVM, + "VMwareVMware": VMware, + "XenVMMXenVMM": XenHVM, +} + +func vendorID() Vendor { + _, b, c, d := cpuid(0) + v := valAsString(b, d, c) + vend, ok := vendorMapping[string(v)] + if !ok { + return Other + } + return vend +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *CPUInfo) cacheSize() { + c.Cache.L1D = -1 + c.Cache.L1I = -1 + c.Cache.L2 = -1 + c.Cache.L3 = -1 + vendor := vendorID() + switch vendor { + case Intel: + if maxFunctionID() < 4 { + return + } + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.Cache.L1D = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.Cache.L1I = size + } else { + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + case AMD: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) + c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) + } + + return +} + +type SGXSupport struct { + Available bool + SGX1Supported bool + SGX2Supported bool + MaxEnclaveSizeNot64 int64 + MaxEnclaveSize64 int64 +} + +func hasSGX(available bool) (rval SGXSupport) { + rval.Available = available + + if !available { + return + } + + a, _, _, d := cpuidex(0x12, 0) + rval.SGX1Supported = a&0x01 != 0 + rval.SGX2Supported = a&0x02 != 0 + rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 + rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + + return +} + +func support() Flags { + mfi := maxFunctionID() + vend := vendorID() + if mfi < 0x1 { + return 0 + } + rval := uint64(0) + _, _, c, d := cpuid(1) + if (d & (1 << 15)) != 0 { + rval |= CMOV + } + if (d & (1 << 23)) != 0 { + rval |= MMX + } + if (d & (1 << 25)) != 0 { + rval |= MMXEXT + } + if (d & (1 << 25)) != 0 { + rval |= SSE + } + if (d & (1 << 26)) != 0 { + rval |= SSE2 + } + if (c & 1) != 0 { + rval |= SSE3 + } + if (c & 0x00000200) != 0 { + rval |= SSSE3 + } + if (c & 0x00080000) != 0 { + rval |= SSE4 + } + if (c & 0x00100000) != 0 { + rval |= SSE42 + } + if (c & (1 << 25)) != 0 { + rval |= AESNI + } + if (c & (1 << 1)) != 0 { + rval |= CLMUL + } + if c&(1<<23) != 0 { + rval |= POPCNT + } + if c&(1<<30) != 0 { + rval |= RDRAND + } + if c&(1<<29) != 0 { + rval |= F16C + } + if c&(1<<13) != 0 { + rval |= CX16 + } + if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { + if threadsPerCore() > 1 { + rval |= HTT + } + } + + // Check XGETBV, OXSAVE and AVX bits + if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + rval |= AVX + if (c & 0x00001000) != 0 { + rval |= FMA3 + } + } + } + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, edx := cpuidex(7, 0) + if (rval&AVX) != 0 && (ebx&0x00000020) != 0 { + rval |= AVX2 + } + if (ebx & 0x00000008) != 0 { + rval |= BMI1 + if (ebx & 0x00000100) != 0 { + rval |= BMI2 + } + } + if ebx&(1<<2) != 0 { + rval |= SGX + } + if ebx&(1<<4) != 0 { + rval |= HLE + } + if ebx&(1<<9) != 0 { + rval |= ERMS + } + if ebx&(1<<11) != 0 { + rval |= RTM + } + if ebx&(1<<14) != 0 { + rval |= MPX + } + if ebx&(1<<18) != 0 { + rval |= RDSEED + } + if ebx&(1<<19) != 0 { + rval |= ADX + } + if ebx&(1<<29) != 0 { + rval |= SHA + } + if edx&(1<<26) != 0 { + rval |= IBPB + } + if edx&(1<<27) != 0 { + rval |= STIBP + } + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { + if ebx&(1<<16) != 0 { + rval |= AVX512F + } + if ebx&(1<<17) != 0 { + rval |= AVX512DQ + } + if ebx&(1<<21) != 0 { + rval |= AVX512IFMA + } + if ebx&(1<<26) != 0 { + rval |= AVX512PF + } + if ebx&(1<<27) != 0 { + rval |= AVX512ER + } + if ebx&(1<<28) != 0 { + rval |= AVX512CD + } + if ebx&(1<<30) != 0 { + rval |= AVX512BW + } + if ebx&(1<<31) != 0 { + rval |= AVX512VL + } + // ecx + if ecx&(1<<1) != 0 { + rval |= AVX512VBMI + } + } + } + } + + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + rval |= LZCNT + rval |= POPCNT + } + if (d & (1 << 31)) != 0 { + rval |= AMD3DNOW + } + if (d & (1 << 30)) != 0 { + rval |= AMD3DNOWEXT + } + if (d & (1 << 23)) != 0 { + rval |= MMX + } + if (d & (1 << 22)) != 0 { + rval |= MMXEXT + } + if (c & (1 << 6)) != 0 { + rval |= SSE4A + } + if d&(1<<20) != 0 { + rval |= NX + } + if d&(1<<27) != 0 { + rval |= RDTSCP + } + + /* Allow for selectively disabling SSE2 functions on AMD processors + with SSE2 support but not SSE4a. This includes Athlon64, some + Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster + than SSE2 often enough to utilize this special-case flag. + AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case + so that SSE2 is used unless explicitly disabled by checking + AV_CPU_FLAG_SSE2SLOW. */ + if vendorID() != Intel && + rval&SSE2 != 0 && (c&0x00000040) == 0 { + rval |= SSE2SLOW + } + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if (rval & AVX) != 0 { + if (c & 0x00000800) != 0 { + rval |= XOP + } + if (c & 0x00010000) != 0 { + rval |= FMA4 + } + } + + if vendorID() == Intel { + family, model := familyModel() + if family == 6 && (model == 9 || model == 13 || model == 14) { + /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and + * 6/14 (core1 "yonah") theoretically support sse2, but it's + * usually slower than mmx. */ + if (rval & SSE2) != 0 { + rval |= SSE2SLOW + } + if (rval & SSE3) != 0 { + rval |= SSE3SLOW + } + } + /* The Atom processor has SSSE3 support, which is useful in many cases, + * but sometimes the SSSE3 version is slower than the SSE2 equivalent + * on the Atom, but is generally faster on other processors supporting + * SSSE3. This flag allows for selectively disabling certain SSSE3 + * functions on the Atom. */ + if family == 6 && model == 28 { + rval |= ATOM + } + } + } + return Flags(rval) +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/vendor/github.com/klauspost/cpuid/cpuid_386.s b/vendor/github.com/klauspost/cpuid/cpuid_386.s new file mode 100644 index 0000000000..4d731711e4 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_386.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s new file mode 100644 index 0000000000..3c1d60e422 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/detect_intel.go b/vendor/github.com/klauspost/cpuid/detect_intel.go new file mode 100644 index 0000000000..a5f04dd6d0 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/detect_intel.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo amd64,!gccgo + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm +} diff --git a/vendor/github.com/klauspost/cpuid/detect_ref.go b/vendor/github.com/klauspost/cpuid/detect_ref.go new file mode 100644 index 0000000000..909c5d9a7a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/detect_ref.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build !amd64,!386 gccgo + +package cpuid + +func initCPU() { + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + xgetbv = func(index uint32) (eax, edx uint32) { + return 0, 0 + } + + rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } +} diff --git a/vendor/github.com/klauspost/cpuid/generate.go b/vendor/github.com/klauspost/cpuid/generate.go new file mode 100644 index 0000000000..90e7a98d27 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/generate.go @@ -0,0 +1,4 @@ +package cpuid + +//go:generate go run private-gen.go +//go:generate gofmt -w ./private diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go new file mode 100644 index 0000000000..437333d292 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private-gen.go @@ -0,0 +1,476 @@ +// +build ignore + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +var inFiles = []string{"cpuid.go", "cpuid_test.go"} +var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"} +var fileSet = token.NewFileSet() +var reWrites = []rewrite{ + initRewrite("CPUInfo -> cpuInfo"), + initRewrite("Vendor -> vendor"), + initRewrite("Flags -> flags"), + initRewrite("Detect -> detect"), + initRewrite("CPU -> cpu"), +} +var excludeNames = map[string]bool{"string": true, "join": true, "trim": true, + // cpuid_test.go + "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true, +} + +var excludePrefixes = []string{"test", "benchmark"} + +func main() { + Package := "private" + parserMode := parser.ParseComments + exported := make(map[string]rewrite) + for _, file := range inFiles { + in, err := os.Open(file) + if err != nil { + log.Fatalf("opening input", err) + } + + src, err := ioutil.ReadAll(in) + if err != nil { + log.Fatalf("reading input", err) + } + + astfile, err := parser.ParseFile(fileSet, file, src, parserMode) + if err != nil { + log.Fatalf("parsing input", err) + } + + for _, rw := range reWrites { + astfile = rw(astfile) + } + + // Inspect the AST and print all identifiers and literals. + var startDecl token.Pos + var endDecl token.Pos + ast.Inspect(astfile, func(n ast.Node) bool { + var s string + switch x := n.(type) { + case *ast.Ident: + if x.IsExported() { + t := strings.ToLower(x.Name) + for _, pre := range excludePrefixes { + if strings.HasPrefix(t, pre) { + return true + } + } + if excludeNames[t] != true { + //if x.Pos() > startDecl && x.Pos() < endDecl { + exported[x.Name] = initRewrite(x.Name + " -> " + t) + } + } + + case *ast.GenDecl: + if x.Tok == token.CONST && x.Lparen > 0 { + startDecl = x.Lparen + endDecl = x.Rparen + // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl)) + } + } + if s != "" { + fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s) + } + return true + }) + + for _, rw := range exported { + astfile = rw(astfile) + } + + var buf bytes.Buffer + + printer.Fprint(&buf, fileSet, astfile) + + // Remove package documentation and insert information + s := buf.String() + ind := strings.Index(buf.String(), "\npackage cpuid") + s = s[ind:] + s = "// Generated, DO NOT EDIT,\n" + + "// but copy it to your own project and rename the package.\n" + + "// See more at http://github.com/klauspost/cpuid\n" + + s + + outputName := Package + string(os.PathSeparator) + file + + err = ioutil.WriteFile(outputName, []byte(s), 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } + log.Println("Generated", outputName) + } + + for _, file := range copyFiles { + dst := "" + if strings.HasPrefix(file, "cpuid") { + dst = Package + string(os.PathSeparator) + file + } else { + dst = Package + string(os.PathSeparator) + "cpuid_" + file + } + err := copyFile(file, dst) + if err != nil { + log.Fatalf("copying file: %s", err) + } + log.Println("Copied", dst) + } +} + +// CopyFile copies a file from src to dst. If src and dst files exist, and are +// the same, then return success. Copy the file contents from src to dst. +func copyFile(src, dst string) (err error) { + sfi, err := os.Stat(src) + if err != nil { + return + } + if !sfi.Mode().IsRegular() { + // cannot copy non-regular files (e.g., directories, + // symlinks, devices, etc.) + return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String()) + } + dfi, err := os.Stat(dst) + if err != nil { + if !os.IsNotExist(err) { + return + } + } else { + if !(dfi.Mode().IsRegular()) { + return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String()) + } + if os.SameFile(sfi, dfi) { + return + } + } + err = copyFileContents(src, dst) + return +} + +// copyFileContents copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all it's contents will be replaced by the contents +// of the source file. +func copyFileContents(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + cerr := out.Close() + if err == nil { + err = cerr + } + }() + if _, err = io.Copy(out, in); err != nil { + return + } + err = out.Sync() + return +} + +type rewrite func(*ast.File) *ast.File + +// Mostly copied from gofmt +func initRewrite(rewriteRule string) rewrite { + f := strings.Split(rewriteRule, "->") + if len(f) != 2 { + fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") + os.Exit(2) + } + pattern := parseExpr(f[0], "pattern") + replace := parseExpr(f[1], "replacement") + return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } +} + +// parseExpr parses s as an expression. +// It might make sense to expand this to allow statement patterns, +// but there are problems with preserving formatting and also +// with what a wildcard for a statement looks like. +func parseExpr(s, what string) ast.Expr { + x, err := parser.ParseExpr(s) + if err != nil { + fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) + os.Exit(2) + } + return x +} + +// Keep this function for debugging. +/* +func dump(msg string, val reflect.Value) { + fmt.Printf("%s:\n", msg) + ast.Print(fileSet, val.Interface()) + fmt.Println() +} +*/ + +// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. +func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { + cmap := ast.NewCommentMap(fileSet, p, p.Comments) + m := make(map[string]reflect.Value) + pat := reflect.ValueOf(pattern) + repl := reflect.ValueOf(replace) + + var rewriteVal func(val reflect.Value) reflect.Value + rewriteVal = func(val reflect.Value) reflect.Value { + // don't bother if val is invalid to start with + if !val.IsValid() { + return reflect.Value{} + } + for k := range m { + delete(m, k) + } + val = apply(rewriteVal, val) + if match(m, pat, val) { + val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) + } + return val + } + + r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) + r.Comments = cmap.Filter(r).Comments() // recreate comments list + return r +} + +// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. +func set(x, y reflect.Value) { + // don't bother if x cannot be set or y is invalid + if !x.CanSet() || !y.IsValid() { + return + } + defer func() { + if x := recover(); x != nil { + if s, ok := x.(string); ok && + (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { + // x cannot be set to y - ignore this rewrite + return + } + panic(x) + } + }() + x.Set(y) +} + +// Values/types for special cases. +var ( + objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) + scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) + + identType = reflect.TypeOf((*ast.Ident)(nil)) + objectPtrType = reflect.TypeOf((*ast.Object)(nil)) + positionType = reflect.TypeOf(token.NoPos) + callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) + scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) +) + +// apply replaces each AST field x in val with f(x), returning val. +// To avoid extra conversions, f operates on the reflect.Value form. +func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { + if !val.IsValid() { + return reflect.Value{} + } + + // *ast.Objects introduce cycles and are likely incorrect after + // rewrite; don't follow them but replace with nil instead + if val.Type() == objectPtrType { + return objectPtrNil + } + + // similarly for scopes: they are likely incorrect after a rewrite; + // replace them with nil + if val.Type() == scopePtrType { + return scopePtrNil + } + + switch v := reflect.Indirect(val); v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + e := v.Index(i) + set(e, f(e)) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + e := v.Field(i) + set(e, f(e)) + } + case reflect.Interface: + e := v.Elem() + set(v, f(e)) + } + return val +} + +func isWildcard(s string) bool { + rune, size := utf8.DecodeRuneInString(s) + return size == len(s) && unicode.IsLower(rune) +} + +// match returns true if pattern matches val, +// recording wildcard submatches in m. +// If m == nil, match checks whether pattern == val. +func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { + // Wildcard matches any expression. If it appears multiple + // times in the pattern, it must match the same expression + // each time. + if m != nil && pattern.IsValid() && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) && val.IsValid() { + // wildcards only match valid (non-nil) expressions. + if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { + if old, ok := m[name]; ok { + return match(nil, old, val) + } + m[name] = val + return true + } + } + } + + // Otherwise, pattern and val must match recursively. + if !pattern.IsValid() || !val.IsValid() { + return !pattern.IsValid() && !val.IsValid() + } + if pattern.Type() != val.Type() { + return false + } + + // Special cases. + switch pattern.Type() { + case identType: + // For identifiers, only the names need to match + // (and none of the other *ast.Object information). + // This is a common case, handle it all here instead + // of recursing down any further via reflection. + p := pattern.Interface().(*ast.Ident) + v := val.Interface().(*ast.Ident) + return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name + case objectPtrType, positionType: + // object pointers and token positions always match + return true + case callExprType: + // For calls, the Ellipsis fields (token.Position) must + // match since that is how f(x) and f(x...) are different. + // Check them here but fall through for the remaining fields. + p := pattern.Interface().(*ast.CallExpr) + v := val.Interface().(*ast.CallExpr) + if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { + return false + } + } + + p := reflect.Indirect(pattern) + v := reflect.Indirect(val) + if !p.IsValid() || !v.IsValid() { + return !p.IsValid() && !v.IsValid() + } + + switch p.Kind() { + case reflect.Slice: + if p.Len() != v.Len() { + return false + } + for i := 0; i < p.Len(); i++ { + if !match(m, p.Index(i), v.Index(i)) { + return false + } + } + return true + + case reflect.Struct: + for i := 0; i < p.NumField(); i++ { + if !match(m, p.Field(i), v.Field(i)) { + return false + } + } + return true + + case reflect.Interface: + return match(m, p.Elem(), v.Elem()) + } + + // Handle token integers, etc. + return p.Interface() == v.Interface() +} + +// subst returns a copy of pattern with values from m substituted in place +// of wildcards and pos used as the position of tokens from the pattern. +// if m == nil, subst returns a copy of pattern and doesn't change the line +// number information. +func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { + if !pattern.IsValid() { + return reflect.Value{} + } + + // Wildcard gets replaced with map value. + if m != nil && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) { + if old, ok := m[name]; ok { + return subst(nil, old, reflect.Value{}) + } + } + } + + if pos.IsValid() && pattern.Type() == positionType { + // use new position only if old position was valid in the first place + if old := pattern.Interface().(token.Pos); !old.IsValid() { + return pattern + } + return pos + } + + // Otherwise copy. + switch p := pattern; p.Kind() { + case reflect.Slice: + v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) + for i := 0; i < p.Len(); i++ { + v.Index(i).Set(subst(m, p.Index(i), pos)) + } + return v + + case reflect.Struct: + v := reflect.New(p.Type()).Elem() + for i := 0; i < p.NumField(); i++ { + v.Field(i).Set(subst(m, p.Field(i), pos)) + } + return v + + case reflect.Ptr: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos).Addr()) + } + return v + + case reflect.Interface: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos)) + } + return v + } + + return pattern +} diff --git a/vendor/github.com/klauspost/crc32/.gitignore b/vendor/github.com/klauspost/crc32/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/crc32/.travis.yml b/vendor/github.com/klauspost/crc32/.travis.yml new file mode 100644 index 0000000000..c50f5b7b0d --- /dev/null +++ b/vendor/github.com/klauspost/crc32/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip + +script: + - go test -v . + - go test -v -race . diff --git a/vendor/github.com/klauspost/crc32/LICENSE b/vendor/github.com/klauspost/crc32/LICENSE new file mode 100644 index 0000000000..4fd5963e39 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2015 Klaus Post + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/crc32/README.md b/vendor/github.com/klauspost/crc32/README.md new file mode 100644 index 0000000000..029625d360 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/README.md @@ -0,0 +1,87 @@ +# crc32 +CRC32 hash with x64 optimizations + +This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup. + +[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32) + +# usage + +Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer. + +Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go. + +# changes +* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match. +* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable. + + +# performance + +For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back. + + +For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction: +``` +benchmark old ns/op new ns/op delta +BenchmarkCrc32KB 99955 10258 -89.74% + +benchmark old MB/s new MB/s speedup +BenchmarkCrc32KB 327.83 3194.20 9.74x +``` + +For other tables and "CLMUL" capable machines the performance is the same as the standard library. + +Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled. + +``` +Std: Standard Go 1.5 library +Crc: Indicates IEEE type CRC. +40B: Size of each slice encoded. +NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine). +Castagnoli: Castagnoli CRC type. + +BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s +BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8) +BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8) + +BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s +BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8) +BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm) + +BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8) +BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8) +BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm) + +BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8) +BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8) +BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm) + +BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s +BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm) +BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8) +BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s +BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm) +BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8) +BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s +BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm) +BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8) +BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s +BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm) +BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8) +BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm) +``` + +The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library. + +However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7. + +# license + +Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions. diff --git a/vendor/github.com/klauspost/crc32/crc32.go b/vendor/github.com/klauspost/crc32/crc32.go new file mode 100644 index 0000000000..8aa91b17e9 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32.go @@ -0,0 +1,207 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32, +// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for +// information. +// +// Polynomials are represented in LSB-first form also known as reversed representation. +// +// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials +// for information. +package crc32 + +import ( + "hash" + "sync" +) + +// The size of a CRC-32 checksum in bytes. +const Size = 4 + +// Predefined polynomials. +const ( + // IEEE is by far and away the most common CRC-32 polynomial. + // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... + IEEE = 0xedb88320 + + // Castagnoli's polynomial, used in iSCSI. + // Has better error detection characteristics than IEEE. + // http://dx.doi.org/10.1109/26.231911 + Castagnoli = 0x82f63b78 + + // Koopman's polynomial. + // Also has better error detection characteristics than IEEE. + // http://dx.doi.org/10.1109/DSN.2002.1028931 + Koopman = 0xeb31d82e +) + +// Table is a 256-word table representing the polynomial for efficient processing. +type Table [256]uint32 + +// This file makes use of functions implemented in architecture-specific files. +// The interface that they implement is as follows: +// +// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE +// // algorithm is available. +// archAvailableIEEE() bool +// +// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm. +// // It can only be called if archAvailableIEEE() returns true. +// archInitIEEE() +// +// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if +// // archInitIEEE() was previously called. +// archUpdateIEEE(crc uint32, p []byte) uint32 +// +// // archAvailableCastagnoli reports whether an architecture-specific +// // CRC32-C algorithm is available. +// archAvailableCastagnoli() bool +// +// // archInitCastagnoli initializes the architecture-specific CRC32-C +// // algorithm. It can only be called if archAvailableCastagnoli() returns +// // true. +// archInitCastagnoli() +// +// // archUpdateCastagnoli updates the given CRC32-C. It can only be called +// // if archInitCastagnoli() was previously called. +// archUpdateCastagnoli(crc uint32, p []byte) uint32 + +// castagnoliTable points to a lazily initialized Table for the Castagnoli +// polynomial. MakeTable will always return this value when asked to make a +// Castagnoli table so we can compare against it to find when the caller is +// using this polynomial. +var castagnoliTable *Table +var castagnoliTable8 *slicing8Table +var castagnoliArchImpl bool +var updateCastagnoli func(crc uint32, p []byte) uint32 +var castagnoliOnce sync.Once + +func castagnoliInit() { + castagnoliTable = simpleMakeTable(Castagnoli) + castagnoliArchImpl = archAvailableCastagnoli() + + if castagnoliArchImpl { + archInitCastagnoli() + updateCastagnoli = archUpdateCastagnoli + } else { + // Initialize the slicing-by-8 table. + castagnoliTable8 = slicingMakeTable(Castagnoli) + updateCastagnoli = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, castagnoliTable8, p) + } + } +} + +// IEEETable is the table for the IEEE polynomial. +var IEEETable = simpleMakeTable(IEEE) + +// ieeeTable8 is the slicing8Table for IEEE +var ieeeTable8 *slicing8Table +var ieeeArchImpl bool +var updateIEEE func(crc uint32, p []byte) uint32 +var ieeeOnce sync.Once + +func ieeeInit() { + ieeeArchImpl = archAvailableIEEE() + + if ieeeArchImpl { + archInitIEEE() + updateIEEE = archUpdateIEEE + } else { + // Initialize the slicing-by-8 table. + ieeeTable8 = slicingMakeTable(IEEE) + updateIEEE = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, ieeeTable8, p) + } + } +} + +// MakeTable returns a Table constructed from the specified polynomial. +// The contents of this Table must not be modified. +func MakeTable(poly uint32) *Table { + switch poly { + case IEEE: + ieeeOnce.Do(ieeeInit) + return IEEETable + case Castagnoli: + castagnoliOnce.Do(castagnoliInit) + return castagnoliTable + } + return simpleMakeTable(poly) +} + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint32 + tab *Table +} + +// New creates a new hash.Hash32 computing the CRC-32 checksum +// using the polynomial represented by the Table. +// Its Sum method will lay the value out in big-endian byte order. +func New(tab *Table) hash.Hash32 { + if tab == IEEETable { + ieeeOnce.Do(ieeeInit) + } + return &digest{0, tab} +} + +// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum +// using the IEEE polynomial. +// Its Sum method will lay the value out in big-endian byte order. +func NewIEEE() hash.Hash32 { return New(IEEETable) } + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Reset() { d.crc = 0 } + +// Update returns the result of adding the bytes in p to the crc. +func Update(crc uint32, tab *Table, p []byte) uint32 { + switch tab { + case castagnoliTable: + return updateCastagnoli(crc, p) + case IEEETable: + // Unfortunately, because IEEETable is exported, IEEE may be used without a + // call to MakeTable. We have to make sure it gets initialized in that case. + ieeeOnce.Do(ieeeInit) + return updateIEEE(crc, p) + default: + return simpleUpdate(crc, tab, p) + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + switch d.tab { + case castagnoliTable: + d.crc = updateCastagnoli(d.crc, p) + case IEEETable: + // We only create digest objects through New() which takes care of + // initialization in this case. + d.crc = updateIEEE(d.crc, p) + default: + d.crc = simpleUpdate(d.crc, d.tab, p) + } + return len(p), nil +} + +func (d *digest) Sum32() uint32 { return d.crc } + +func (d *digest) Sum(in []byte) []byte { + s := d.Sum32() + return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// Checksum returns the CRC-32 checksum of data +// using the polynomial represented by the Table. +func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) } + +// ChecksumIEEE returns the CRC-32 checksum of data +// using the IEEE polynomial. +func ChecksumIEEE(data []byte) uint32 { + ieeeOnce.Do(ieeeInit) + return updateIEEE(0, data) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.go b/vendor/github.com/klauspost/crc32/crc32_amd64.go new file mode 100644 index 0000000000..af2a0b844b --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.go @@ -0,0 +1,230 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!gccgo + +// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a +// description of the interface that each architecture-specific file +// implements. + +package crc32 + +import "unsafe" + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// and IEEE CRC. + +// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use +// CPUID to test for SSE 4.1, 4.2 and CLMUL support. +func haveSSE41() bool +func haveSSE42() bool +func haveCLMUL() bool + +// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42Triple( + crcA, crcB, crcC uint32, + a, b, c []byte, + rounds uint32, +) (retA uint32, retB uint32, retC uint32) + +// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +//go:noescape +func ieeeCLMUL(crc uint32, p []byte) uint32 + +var sse42 = haveSSE42() +var useFastIEEE = haveCLMUL() && haveSSE41() + +const castagnoliK1 = 168 +const castagnoliK2 = 1344 + +type sse42Table [4]Table + +var castagnoliSSE42TableK1 *sse42Table +var castagnoliSSE42TableK2 *sse42Table + +func archAvailableCastagnoli() bool { + return sse42 +} + +func archInitCastagnoli() { + if !sse42 { + panic("arch-specific Castagnoli not available") + } + castagnoliSSE42TableK1 = new(sse42Table) + castagnoliSSE42TableK2 = new(sse42Table) + // See description in updateCastagnoli. + // t[0][i] = CRC(i000, O) + // t[1][i] = CRC(0i00, O) + // t[2][i] = CRC(00i0, O) + // t[3][i] = CRC(000i, O) + // where O is a sequence of K zeros. + var tmp [castagnoliK2]byte + for b := 0; b < 4; b++ { + for i := 0; i < 256; i++ { + val := uint32(i) << uint32(b*8) + castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1]) + castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:]) + } + } +} + +// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the +// table given) with the given initial crc value. This corresponds to +// CRC(crc, O) in the description in updateCastagnoli. +func castagnoliShift(table *sse42Table, crc uint32) uint32 { + return table[3][crc>>24] ^ + table[2][(crc>>16)&0xFF] ^ + table[1][(crc>>8)&0xFF] ^ + table[0][crc&0xFF] +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !sse42 { + panic("not available") + } + + // This method is inspired from the algorithm in Intel's white paper: + // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction" + // The same strategy of splitting the buffer in three is used but the + // combining calculation is different; the complete derivation is explained + // below. + // + // -- The basic idea -- + // + // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a + // time. In recent Intel architectures the instruction takes 3 cycles; + // however the processor can pipeline up to three instructions if they + // don't depend on each other. + // + // Roughly this means that we can process three buffers in about the same + // time we can process one buffer. + // + // The idea is then to split the buffer in three, CRC the three pieces + // separately and then combine the results. + // + // Combining the results requires precomputed tables, so we must choose a + // fixed buffer length to optimize. The longer the length, the faster; but + // only buffers longer than this length will use the optimization. We choose + // two cutoffs and compute tables for both: + // - one around 512: 168*3=504 + // - one around 4KB: 1344*3=4032 + // + // -- The nitty gritty -- + // + // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with + // initial non-inverted CRC I). This function has the following properties: + // (a) CRC(I, AB) = CRC(CRC(I, A), B) + // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B) + // + // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of + // K bytes each, where K is a fixed constant. Let O be the sequence of K zero + // bytes. + // + // CRC(I, ABC) = CRC(I, ABO xor C) + // = CRC(I, ABO) xor CRC(0, C) + // = CRC(CRC(I, AB), O) xor CRC(0, C) + // = CRC(CRC(I, AO xor B), O) xor CRC(0, C) + // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C) + // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C) + // + // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B), + // and CRC(0, C) efficiently. We just need to find a way to quickly compute + // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these + // values; since we can't have a 32-bit table, we break it up into four + // 8-bit tables: + // + // CRC(uvwx, O) = CRC(u000, O) xor + // CRC(0v00, O) xor + // CRC(00w0, O) xor + // CRC(000x, O) + // + // We can compute tables corresponding to the four terms for all 8-bit + // values. + + crc = ^crc + + // If a buffer is long enough to use the optimization, process the first few + // bytes to align the buffer to an 8 byte boundary (if necessary). + if len(p) >= castagnoliK1*3 { + delta := int(uintptr(unsafe.Pointer(&p[0])) & 7) + if delta != 0 { + delta = 8 - delta + crc = castagnoliSSE42(crc, p[:delta]) + p = p[delta:] + } + } + + // Process 3*K2 at a time. + for len(p) >= castagnoliK2*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK2:], p[castagnoliK2*2:], + castagnoliK2/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC + p = p[castagnoliK2*3:] + } + + // Process 3*K1 at a time. + for len(p) >= castagnoliK1*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK1:], p[castagnoliK1*2:], + castagnoliK1/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC + p = p[castagnoliK1*3:] + } + + // Use the simple implementation for what's left. + crc = castagnoliSSE42(crc, p) + return ^crc +} + +func archAvailableIEEE() bool { + return useFastIEEE +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !useFastIEEE { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !useFastIEEE { + panic("not available") + } + + if len(p) >= 64 { + left := len(p) & 15 + do := len(p) - left + crc = ^ieeeCLMUL(^crc, p[:do]) + p = p[do:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.s b/vendor/github.com/klauspost/crc32/crc32_amd64.s new file mode 100644 index 0000000000..e8a7941ce7 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.s @@ -0,0 +1,319 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +#define NOSPLIT 4 +#define RODATA 8 + +// castagnoliSSE42 updates the (non-inverted) crc with the given buffer. +// +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + // If there are fewer than 8 bytes to process, skip alignment. + CMPQ CX, $8 + JL less_than_8 + + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + // Process the first few bytes to 8-byte align the input. + + // BX = 8 - BX. We need to process this many bytes to align. + SUBQ $1, BX + XORQ $7, BX + + BTQ $0, BX + JNC align_2 + + CRC32B (SI), AX + DECQ CX + INCQ SI + +align_2: + BTQ $1, BX + JNC align_4 + + // CRC32W (SI), AX + BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + + SUBQ $2, CX + ADDQ $2, SI + +align_4: + BTQ $2, BX + JNC aligned + + // CRC32L (SI), AX + BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + + SUBQ $4, CX + ADDQ $4, SI + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL less_than_8 + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +less_than_8: + // We may have some bytes left over; process 4 bytes, then 2, then 1. + BTQ $2, CX + JNC less_than_4 + + // CRC32L (SI), AX + BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + ADDQ $4, SI + +less_than_4: + BTQ $1, CX + JNC less_than_2 + + // CRC32W (SI), AX + BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + ADDQ $2, SI + +less_than_2: + BTQ $0, CX + JNC done + + CRC32B (SI), AX + +done: + MOVL AX, ret+32(FP) + RET + +// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds) +// bytes from each buffer. +// +// func castagnoliSSE42Triple( +// crc1, crc2, crc3 uint32, +// a, b, c []byte, +// rounds uint32, +// ) (retA uint32, retB uint32, retC uint32) +TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0 + MOVL crcA+0(FP), AX + MOVL crcB+4(FP), CX + MOVL crcC+8(FP), DX + + MOVQ a+16(FP), R8 // data pointer + MOVQ b+40(FP), R9 // data pointer + MOVQ c+64(FP), R10 // data pointer + + MOVL rounds+88(FP), R11 + +loop: + CRC32Q (R8), AX + CRC32Q (R9), CX + CRC32Q (R10), DX + + CRC32Q 8(R8), AX + CRC32Q 8(R9), CX + CRC32Q 8(R10), DX + + CRC32Q 16(R8), AX + CRC32Q 16(R9), CX + CRC32Q 16(R10), DX + + ADDQ $24, R8 + ADDQ $24, R9 + ADDQ $24, R10 + + DECQ R11 + JNZ loop + + MOVL AX, retA+96(FP) + MOVL CX, retB+100(FP) + MOVL DX, retC+104(FP) + RET + +// func haveSSE42() bool +TEXT ·haveSSE42(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $20, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func haveCLMUL() bool +TEXT ·haveCLMUL(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $1, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func haveSSE41() bool +TEXT ·haveSSE41(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $19, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// CRC32 polynomial data +// +// These constants are lifted from the +// Linux kernel, since they avoid the costly +// PSHUFB 16 byte reversal proposed in the +// original Intel paper. +DATA r2r1kp<>+0(SB)/8, $0x154442bd4 +DATA r2r1kp<>+8(SB)/8, $0x1c6e41596 +DATA r4r3kp<>+0(SB)/8, $0x1751997d0 +DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e +DATA rupolykp<>+0(SB)/8, $0x1db710641 +DATA rupolykp<>+8(SB)/8, $0x1f7011641 +DATA r5kp<>+0(SB)/8, $0x163cd6124 + +GLOBL r2r1kp<>(SB), RODATA, $16 +GLOBL r4r3kp<>(SB), RODATA, $16 +GLOBL rupolykp<>(SB), RODATA, $16 +GLOBL r5kp<>(SB), RODATA, $8 + +// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 64, and must be a multiple of 16. + +// func ieeeCLMUL(crc uint32, p []byte) uint32 +TEXT ·ieeeCLMUL(SB), NOSPLIT, $0 + MOVL crc+0(FP), X0 // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + MOVOU (SI), X1 + MOVOU 16(SI), X2 + MOVOU 32(SI), X3 + MOVOU 48(SI), X4 + PXOR X0, X1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left + JB remain64 + + MOVOA r2r1kp<>+0(SB), X0 + +loopback64: + MOVOA X1, X5 + MOVOA X2, X6 + MOVOA X3, X7 + MOVOA X4, X8 + + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0, X0, X2 + PCLMULQDQ $0, X0, X3 + PCLMULQDQ $0, X0, X4 + + // Load next early + MOVOU (SI), X11 + MOVOU 16(SI), X12 + MOVOU 32(SI), X13 + MOVOU 48(SI), X14 + + PCLMULQDQ $0x11, X0, X5 + PCLMULQDQ $0x11, X0, X6 + PCLMULQDQ $0x11, X0, X7 + PCLMULQDQ $0x11, X0, X8 + + PXOR X5, X1 + PXOR X6, X2 + PXOR X7, X3 + PXOR X8, X4 + + PXOR X11, X1 + PXOR X12, X2 + PXOR X13, X3 + PXOR X14, X4 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + MOVOA r4r3kp<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // If there is less than 16 bytes left we are done + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5kp<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOA rupolykp<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + // PEXTRD $1, X1, AX (SSE 4.1) + BYTE $0x66; BYTE $0x0f; BYTE $0x3a + BYTE $0x16; BYTE $0xc8; BYTE $0x01 + MOVL AX, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.go b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go new file mode 100644 index 0000000000..3222b06a5a --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go @@ -0,0 +1,43 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!gccgo + +package crc32 + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// CRC. + +// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2 +// support. +func haveSSE42() bool + +// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +var sse42 = haveSSE42() + +func archAvailableCastagnoli() bool { + return sse42 +} + +func archInitCastagnoli() { + if !sse42 { + panic("not available") + } + // No initialization necessary. +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !sse42 { + panic("not available") + } + return castagnoliSSE42(crc, p) +} + +func archAvailableIEEE() bool { return false } +func archInitIEEE() { panic("not available") } +func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.s b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s new file mode 100644 index 0000000000..a578d685cc --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s @@ -0,0 +1,67 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +#define NOSPLIT 4 +#define RODATA 8 + +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVL p+4(FP), SI // data pointer + MOVL p_len+8(FP), CX // len(p) + + NOTL AX + + // If there's less than 8 bytes to process, we do it byte-by-byte. + CMPQ CX, $8 + JL cleanup + + // Process individual bytes until the input is 8-byte aligned. +startup: + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + CRC32B (SI), AX + DECQ CX + INCQ SI + JMP startup + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL cleanup + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +cleanup: + // We may have some bytes left over that we process one at a time. + CMPQ CX, $0 + JE done + + CRC32B (SI), AX + INCQ SI + DECQ CX + JMP cleanup + +done: + NOTL AX + MOVL AX, ret+16(FP) + RET + +// func haveSSE42() bool +TEXT ·haveSSE42(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $20, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + diff --git a/vendor/github.com/klauspost/crc32/crc32_generic.go b/vendor/github.com/klauspost/crc32/crc32_generic.go new file mode 100644 index 0000000000..abacbb663d --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_generic.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains CRC32 algorithms that are not specific to any architecture +// and don't use hardware acceleration. +// +// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table. +// +// The slicing-by-8 algorithm is a faster implementation that uses a bigger +// table (8*256*4 bytes). + +package crc32 + +// simpleMakeTable allocates and constructs a Table for the specified +// polynomial. The table is suitable for use with the simple algorithm +// (simpleUpdate). +func simpleMakeTable(poly uint32) *Table { + t := new(Table) + simplePopulateTable(poly, t) + return t +} + +// simplePopulateTable constructs a Table for the specified polynomial, suitable +// for use with simpleUpdate. +func simplePopulateTable(poly uint32, t *Table) { + for i := 0; i < 256; i++ { + crc := uint32(i) + for j := 0; j < 8; j++ { + if crc&1 == 1 { + crc = (crc >> 1) ^ poly + } else { + crc >>= 1 + } + } + t[i] = crc + } +} + +// simpleUpdate uses the simple algorithm to update the CRC, given a table that +// was previously computed using simpleMakeTable. +func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 { + crc = ^crc + for _, v := range p { + crc = tab[byte(crc)^v] ^ (crc >> 8) + } + return ^crc +} + +// Use slicing-by-8 when payload >= this value. +const slicing8Cutoff = 16 + +// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm. +type slicing8Table [8]Table + +// slicingMakeTable constructs a slicing8Table for the specified polynomial. The +// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate). +func slicingMakeTable(poly uint32) *slicing8Table { + t := new(slicing8Table) + simplePopulateTable(poly, &t[0]) + for i := 0; i < 256; i++ { + crc := t[0][i] + for j := 1; j < 8; j++ { + crc = t[0][crc&0xFF] ^ (crc >> 8) + t[j][i] = crc + } + } + return t +} + +// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a +// table that was previously computed using slicingMakeTable. +func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 { + if len(p) >= slicing8Cutoff { + crc = ^crc + for len(p) > 8 { + crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^ + tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^ + tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF] + p = p[8:] + } + crc = ^crc + } + if len(p) == 0 { + return crc + } + return simpleUpdate(crc, &tab[0], p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_otherarch.go b/vendor/github.com/klauspost/crc32/crc32_otherarch.go new file mode 100644 index 0000000000..cc960764bc --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_otherarch.go @@ -0,0 +1,15 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!amd64p32,!s390x + +package crc32 + +func archAvailableIEEE() bool { return false } +func archInitIEEE() { panic("not available") } +func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } + +func archAvailableCastagnoli() bool { return false } +func archInitCastagnoli() { panic("not available") } +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.go b/vendor/github.com/klauspost/crc32/crc32_s390x.go new file mode 100644 index 0000000000..ce96f03281 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.go @@ -0,0 +1,91 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x + +package crc32 + +const ( + vxMinLen = 64 + vxAlignMask = 15 // align to 16 bytes +) + +// hasVectorFacility reports whether the machine has the z/Architecture +// vector facility installed and enabled. +func hasVectorFacility() bool + +var hasVX = hasVectorFacility() + +// vectorizedCastagnoli implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +//go:noescape +func vectorizedCastagnoli(crc uint32, p []byte) uint32 + +// vectorizedIEEE implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +//go:noescape +func vectorizedIEEE(crc uint32, p []byte) uint32 + +func archAvailableCastagnoli() bool { + return hasVX +} + +var archCastagnoliTable8 *slicing8Table + +func archInitCastagnoli() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archCastagnoliTable8 = slicingMakeTable(Castagnoli) +} + +// archUpdateCastagnoli calculates the checksum of p using +// vectorizedCastagnoli. +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedCastagnoli(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archCastagnoliTable8, p) +} + +func archAvailableIEEE() bool { + return hasVX +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +// archUpdateIEEE calculates the checksum of p using vectorizedIEEE. +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedIEEE(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.s b/vendor/github.com/klauspost/crc32/crc32_s390x.s new file mode 100644 index 0000000000..e980ca29d6 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.s @@ -0,0 +1,249 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x + +#include "textflag.h" + +// Vector register range containing CRC-32 constants + +#define CONST_PERM_LE2BE V9 +#define CONST_R2R1 V10 +#define CONST_R4R3 V11 +#define CONST_R5 V12 +#define CONST_RU_POLY V13 +#define CONST_CRC_POLY V14 + +// The CRC-32 constant block contains reduction constants to fold and +// process particular chunks of the input data stream in parallel. +// +// Note that the constant definitions below are extended in order to compute +// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. +// The rightmost doubleword can be 0 to prevent contribution to the result or +// can be multiplied by 1 to perform an XOR without the need for a separate +// VECTOR EXCLUSIVE OR instruction. +// +// The polynomials used are bit-reflected: +// +// IEEE: P'(x) = 0x0edb88320 +// Castagnoli: P'(x) = 0x082f63b78 + +// IEEE polynomial constants +DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crcleconskp+8(SB)/8, $0x0706050403020100 +DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2 +DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1 +DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4 +DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3 +DATA ·crcleconskp+48(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5 +DATA ·crcleconskp+64(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u' +DATA ·crcleconskp+80(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1 + +GLOBL ·crcleconskp(SB), RODATA, $144 + +// Castagonli Polynomial constants +DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crccleconskp+8(SB)/8, $0x0706050403020100 +DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2 +DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1 +DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4 +DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3 +DATA ·crccleconskp+48(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5 +DATA ·crccleconskp+64(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u' +DATA ·crccleconskp+80(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1 + +GLOBL ·crccleconskp(SB), RODATA, $144 + +// func hasVectorFacility() bool +TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1 + MOVD $x-24(SP), R1 + XC $24, 0(R1), 0(R1) // clear the storage + MOVD $2, R0 // R0 is the number of double words stored -1 + WORD $0xB2B01000 // STFLE 0(R1) + XOR R0, R0 // reset the value of R0 + MOVBZ z-8(SP), R1 + AND $0x40, R1 + BEQ novector + +vectorinstalled: + // check if the vector instruction has been enabled + VLEIB $0, $0xF, V16 + VLGVB $0, V16, R1 + CMPBNE R1, $0xF, novector + MOVB $1, ret+0(FP) // have vx + RET + +novector: + MOVB $0, ret+0(FP) // no vx + RET + +// The CRC-32 function(s) use these calling conventions: +// +// Parameters: +// +// R2: Initial CRC value, typically ~0; and final CRC (return) value. +// R3: Input buffer pointer, performance might be improved if the +// buffer is on a doubleword boundary. +// R4: Length of the buffer, must be 64 bytes or greater. +// +// Register usage: +// +// R5: CRC-32 constant pool base pointer. +// V0: Initial CRC value and intermediate constants and results. +// V1..V4: Data for CRC computation. +// V5..V8: Next data chunks that are fetched from the input buffer. +// +// V9..V14: CRC-32 constants. + +// func vectorizedIEEE(crc uint32, p []byte) uint32 +TEXT ·vectorizedIEEE(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + MOVD $·crcleconskp(SB), R5 + BR vectorizedBody<>(SB) + +// func vectorizedCastagnoli(crc uint32, p []byte) uint32 +TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + // R5: crc-32 constant pool base pointer, constant is used to reduce crc + MOVD $·crccleconskp(SB), R5 + BR vectorizedBody<>(SB) + +TEXT vectorizedBody<>(SB), NOSPLIT, $0 + XOR $0xffffffff, R2 // NOTW R2 + VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY + + // Load the initial CRC value into the rightmost word of V0 + VZERO V0 + VLVGF $3, R2, V0 + + // Crash if the input size is less than 64-bytes. + CMP R4, $64 + BLT crash + + // Load a 64-byte data chunk and XOR with CRC + VLM 0(R3), V1, V4 // 64-bytes into V1..V4 + + // Reflect the data if the CRC operation is in the bit-reflected domain + VPERM V1, V1, CONST_PERM_LE2BE, V1 + VPERM V2, V2, CONST_PERM_LE2BE, V2 + VPERM V3, V3, CONST_PERM_LE2BE, V3 + VPERM V4, V4, CONST_PERM_LE2BE, V4 + + VX V0, V1, V1 // V1 ^= CRC + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 + + // Check remaining buffer size and jump to proper folding method + CMP R4, $64 + BLT less_than_64bytes + +fold_64bytes_loop: + // Load the next 64-byte data chunk into V5 to V8 + VLM 0(R3), V5, V8 + VPERM V5, V5, CONST_PERM_LE2BE, V5 + VPERM V6, V6, CONST_PERM_LE2BE, V6 + VPERM V7, V7, CONST_PERM_LE2BE, V7 + VPERM V8, V8, CONST_PERM_LE2BE, V8 + + // Perform a GF(2) multiplication of the doublewords in V1 with + // the reduction constants in V0. The intermediate result is + // then folded (accumulated) with the next data chunk in V5 and + // stored in V1. Repeat this step for the register contents + // in V2, V3, and V4 respectively. + + VGFMAG CONST_R2R1, V1, V5, V1 + VGFMAG CONST_R2R1, V2, V6, V2 + VGFMAG CONST_R2R1, V3, V7, V3 + VGFMAG CONST_R2R1, V4, V8, V4 + + // Adjust buffer pointer and length for next loop + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 // LEN = LEN - 64 + + CMP R4, $64 + BGE fold_64bytes_loop + +less_than_64bytes: + // Fold V1 to V4 into a single 128-bit value in V1 + VGFMAG CONST_R4R3, V1, V2, V1 + VGFMAG CONST_R4R3, V1, V3, V1 + VGFMAG CONST_R4R3, V1, V4, V1 + + // Check whether to continue with 64-bit folding + CMP R4, $16 + BLT final_fold + +fold_16bytes_loop: + VL 0(R3), V2 // Load next data chunk + VPERM V2, V2, CONST_PERM_LE2BE, V2 + + VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk + + // Adjust buffer pointer and size for folding next data chunk + ADD $16, R3 + ADD $-16, R4 + + // Process remaining data chunks + CMP R4, $16 + BGE fold_16bytes_loop + +final_fold: + VLEIB $7, $0x40, V9 + VSRLB V9, CONST_R4R3, V0 + VLEIG $0, $1, V0 + + VGFMG V0, V1, V1 + + VLEIB $7, $0x20, V9 // Shift by words + VSRLB V9, V1, V2 // Store remaining bits in V2 + VUPLLF V1, V1 // Split rightmost doubleword + VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2 + + // The input values to the Barret reduction are the degree-63 polynomial + // in V1 (R(x)), degree-32 generator polynomial, and the reduction + // constant u. The Barret reduction result is the CRC value of R(x) mod + // P(x). + // + // The Barret reduction algorithm is defined as: + // + // 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u + // 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) + // 3. C(x) = R(x) XOR T2(x) mod x^32 + // + // Note: To compensate the division by x^32, use the vector unpack + // instruction to move the leftmost word into the leftmost doubleword + // of the vector register. The rightmost doubleword is multiplied + // with zero to not contribute to the intermedate results. + + // T1(x) = floor( R(x) / x^32 ) GF2MUL u + VUPLLF V1, V2 + VGFMG CONST_RU_POLY, V2, V2 + + // Compute the GF(2) product of the CRC polynomial in VO with T1(x) in + // V2 and XOR the intermediate result, T2(x), with the value in V1. + // The final result is in the rightmost word of V2. + + VUPLLF V2, V2 + VGFMAG CONST_CRC_POLY, V2, V1, V2 + +done: + VLGVF $2, V2, R2 + XOR $0xffffffff, R2 // NOTW R2 + MOVWZ R2, ret + 32(FP) + RET + +crash: + MOVD $0, (R0) // input size is less than 64-bytes diff --git a/vendor/github.com/klauspost/pgzip/.gitignore b/vendor/github.com/klauspost/pgzip/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/pgzip/.travis.yml b/vendor/github.com/klauspost/pgzip/.travis.yml new file mode 100644 index 0000000000..18dacf4547 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/.travis.yml @@ -0,0 +1,23 @@ +language: go + +sudo: false + +os: + - linux + - osx + +go: + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - master + +script: + - go test -v -cpu=1,2,4 . + - go test -v -cpu=2 -race -short . + +matrix: + allow_failures: + - go: 'master' + fast_finish: true diff --git a/vendor/github.com/klauspost/pgzip/GO_LICENSE b/vendor/github.com/klauspost/pgzip/GO_LICENSE new file mode 100644 index 0000000000..7448756763 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/GO_LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/pgzip/LICENSE b/vendor/github.com/klauspost/pgzip/LICENSE new file mode 100644 index 0000000000..2bdc0d7517 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/pgzip/README.md b/vendor/github.com/klauspost/pgzip/README.md new file mode 100644 index 0000000000..97f5d16566 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/README.md @@ -0,0 +1,136 @@ +pgzip +===== + +Go parallel gzip compression/decompression. This is a fully gzip compatible drop in replacement for "compress/gzip". + +This will split compression into blocks that are compressed in parallel. +This can be useful for compressing big amounts of data. The output is a standard gzip file. + +The gzip decompression is modified so it decompresses ahead of the current reader. +This means that reads will be non-blocking if the decompressor can keep ahead of your code reading from it. +CRC calculation also takes place in a separate goroutine. + +You should only use this if you are (de)compressing big amounts of data, +say **more than 1MB** at the time, otherwise you will not see any benefit, +and it will likely be faster to use the internal gzip library +or [this package](https://github.com/klauspost/compress). + +It is important to note that this library creates and reads *standard gzip files*. +You do not have to match the compressor/decompressor to get the described speedups, +and the gzip files are fully compatible with other gzip readers/writers. + +A golang variant of this is [bgzf](https://godoc.org/github.com/biogo/hts/bgzf), +which has the same feature, as well as seeking in the resulting file. +The only drawback is a slightly bigger overhead compared to this and pure gzip. +See a comparison below. + +[![GoDoc][1]][2] [![Build Status][3]][4] + +[1]: https://godoc.org/github.com/klauspost/pgzip?status.svg +[2]: https://godoc.org/github.com/klauspost/pgzip +[3]: https://travis-ci.org/klauspost/pgzip.svg +[4]: https://travis-ci.org/klauspost/pgzip + +Installation +==== +```go get github.com/klauspost/pgzip/...``` + +You might need to get/update the dependencies: + +``` +go get -u github.com/klauspost/compress +go get -u github.com/klauspost/crc32 +``` + +Usage +==== +[Godoc Doumentation](https://godoc.org/github.com/klauspost/pgzip) + +To use as a replacement for gzip, exchange + +```import "compress/gzip"``` +with +```import gzip "github.com/klauspost/pgzip"```. + +# Changes + +* Oct 6, 2016: Fixed an issue if the destination writer returned an error. +* Oct 6, 2016: Better buffer reuse, should now generate less garbage. +* Oct 6, 2016: Output does not change based on write sizes. +* Dec 8, 2015: Decoder now supports the io.WriterTo interface, giving a speedup and less GC pressure. +* Oct 9, 2015: Reduced allocations by ~35 by using sync.Pool. ~15% overall speedup. + +Changes in [github.com/klauspost/compress](https://github.com/klauspost/compress#changelog) are also carried over, so see that for more changes. + +## Compression +The simplest way to use this is to simply do the same as you would when using [compress/gzip](http://golang.org/pkg/compress/gzip). + +To change the block size, use the added (*pgzip.Writer).SetConcurrency(blockSize, blocks int) function. With this you can control the approximate size of your blocks, as well as how many you want to be processing in parallel. Default values for this is SetConcurrency(250000, 16), meaning blocks are split at 250000 bytes and up to 16 blocks can be processing at once before the writer blocks. + + +Example: +``` +var b bytes.Buffer +w := gzip.NewWriter(&b) +w.SetConcurrency(100000, 10) +w.Write([]byte("hello, world\n")) +w.Close() +``` + +To get any performance gains, you should at least be compressing more than 1 megabyte of data at the time. + +You should at least have a block size of 100k and at least a number of blocks that match the number of cores your would like to utilize, but about twice the number of blocks would be the best. + +Another side effect of this is, that it is likely to speed up your other code, since writes to the compressor only blocks if the compressor is already compressing the number of blocks you have specified. This also means you don't have worry about buffering input to the compressor. + +## Decompression + +Decompression works similar to compression. That means that you simply call pgzip the same way as you would call [compress/gzip](http://golang.org/pkg/compress/gzip). + +The only difference is that if you want to specify your own readahead, you have to use `pgzip.NewReaderN(r io.Reader, blockSize, blocks int)` to get a reader with your custom blocksizes. The `blockSize` is the size of each block decoded, and `blocks` is the maximum number of blocks that is decoded ahead. + +See [Example on playground](http://play.golang.org/p/uHv1B5NbDh) + +Performance +==== +## Compression + +See my blog post in [Benchmarks of Golang Gzip](https://blog.klauspost.com/go-gzipdeflate-benchmarks/). + +Compression cost is usually about 0.2% with default settings with a block size of 250k. + +Example with GOMAXPROC set to 8 (quad core with 8 hyperthreads) + +Content is [Matt Mahoneys 10GB corpus](http://mattmahoney.net/dc/10gb.html). Compression level 6. + +Compressor | MB/sec | speedup | size | size overhead (lower=better) +------------|----------|---------|------|--------- +[gzip](http://golang.org/pkg/compress/gzip) (golang) | 7.21MB/s | 1.0x | 4786608902 | 0% +[gzip](http://github.com/klauspost/compress/gzip) (klauspost) | 10.98MB/s | 1.52x | 4781331645 | -0.11% +[pgzip](https://github.com/klauspost/pgzip) (klauspost) | 50.76MB/s|7.04x | 4784121440 | -0.052% +[bgzf](https://godoc.org/github.com/biogo/hts/bgzf) (biogo) | 38.65MB/s | 5.36x | 4924899484 | 2.889% +[pargzip](https://godoc.org/github.com/golang/build/pargzip) (builder) | 32.00MB/s | 4.44x | 4791226567 | 0.096% + +pgzip also contains a [linear time compression](https://github.com/klauspost/compress#linear-time-compression) mode, that will allow compression at ~150MB per core per second, independent of the content. + +See the [complete sheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) for different content types and compression settings. + +## Decompression + +The decompression speedup is there because it allows you to do other work while the decompression is taking place. + +In the example above, the numbers are as follows on a 4 CPU machine: + +Decompressor | Time | Speedup +-------------|------|-------- +[gzip](http://golang.org/pkg/compress/gzip) (golang) | 1m28.85s | 0% +[pgzip](https://github.com/klauspost/pgzip) (golang) | 43.48s | 104% + +But wait, since gzip decompression is inherently singlethreaded (aside from CRC calculation) how can it be more than 100% faster? Because pgzip due to its design also acts as a buffer. When using ubuffered gzip, you are also waiting for io when you are decompressing. If the gzip decoder can keep up, it will always have data ready for your reader, and you will not be waiting for input to the gzip decompressor to complete. + +This is pretty much an optimal situation for pgzip, but it reflects most common usecases for CPU intensive gzip usage. + +I haven't included [bgzf](https://godoc.org/github.com/biogo/hts/bgzf) in this comparision, since it only can decompress files created by a compatible encoder, and therefore cannot be considered a generic gzip decompressor. But if you are able to compress your files with a bgzf compatible program, you can expect it to scale beyond 100%. + +#License +This contains large portions of code from the go repository - see GO_LICENSE for more information. The changes are released under MIT License. See LICENSE for more information. diff --git a/vendor/github.com/klauspost/pgzip/circle.yml b/vendor/github.com/klauspost/pgzip/circle.yml new file mode 100644 index 0000000000..67b2b1628f --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/circle.yml @@ -0,0 +1,7 @@ +test: + pre: + - go vet ./... + + override: + - go test -v -cpu=1,2,4 . + - go test -v -cpu=2 -race -short . \ No newline at end of file diff --git a/vendor/github.com/klauspost/pgzip/gunzip.go b/vendor/github.com/klauspost/pgzip/gunzip.go new file mode 100644 index 0000000000..f0e8fcb3d8 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/gunzip.go @@ -0,0 +1,573 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pgzip implements reading and writing of gzip format compressed files, +// as specified in RFC 1952. +// +// This is a drop in replacement for "compress/gzip". +// This will split compression into blocks that are compressed in parallel. +// This can be useful for compressing big amounts of data. +// The gzip decompression has not been modified, but remains in the package, +// so you can use it as a complete replacement for "compress/gzip". +// +// See more at https://github.com/klauspost/pgzip +package pgzip + +import ( + "bufio" + "errors" + "hash" + "io" + "sync" + "time" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/crc32" +) + +const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + flagText = 1 << 0 + flagHdrCrc = 1 << 1 + flagExtra = 1 << 2 + flagName = 1 << 3 + flagComment = 1 << 4 +) + +func makeReader(r io.Reader) flate.Reader { + if rr, ok := r.(flate.Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +var ( + // ErrChecksum is returned when reading GZIP data that has an invalid checksum. + ErrChecksum = errors.New("gzip: invalid checksum") + // ErrHeader is returned when reading GZIP data that has an invalid header. + ErrHeader = errors.New("gzip: invalid header") +) + +// The gzip file stores a header giving metadata about the compressed file. +// That header is exposed as the fields of the Writer and Reader structs. +type Header struct { + Comment string // comment + Extra []byte // "extra data" + ModTime time.Time // modification time + Name string // file name + OS byte // operating system type +} + +// A Reader is an io.Reader that can be read to retrieve +// uncompressed data from a gzip-format compressed file. +// +// In general, a gzip file can be a concatenation of gzip files, +// each with its own header. Reads from the Reader +// return the concatenation of the uncompressed data of each. +// Only the first header is recorded in the Reader fields. +// +// Gzip files store a length and checksum of the uncompressed data. +// The Reader will return a ErrChecksum when Read +// reaches the end of the uncompressed data if it does not +// have the expected length or checksum. Clients should treat data +// returned by Read as tentative until they receive the io.EOF +// marking the end of the data. +type Reader struct { + Header + r flate.Reader + decompressor io.ReadCloser + digest hash.Hash32 + size uint32 + flg byte + buf [512]byte + err error + closeErr chan error + multistream bool + + readAhead chan read + roff int // read offset + current []byte + closeReader chan struct{} + lastBlock bool + blockSize int + blocks int + + activeRA bool // Indication if readahead is active + mu sync.Mutex // Lock for above + + blockPool chan []byte +} + +type read struct { + b []byte + err error +} + +// NewReader creates a new Reader reading the given reader. +// The implementation buffers input and may read more data than necessary from r. +// It is the caller's responsibility to call Close on the Reader when done. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + z.blocks = defaultBlocks + z.blockSize = defaultBlockSize + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.multistream = true + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + if err := z.readHeader(true); err != nil { + return nil, err + } + return z, nil +} + +// NewReaderN creates a new Reader reading the given reader. +// The implementation buffers input and may read more data than necessary from r. +// It is the caller's responsibility to call Close on the Reader when done. +// +// With this you can control the approximate size of your blocks, +// as well as how many blocks you want to have prefetched. +// +// Default values for this is blockSize = 250000, blocks = 16, +// meaning up to 16 blocks of maximum 250000 bytes will be +// prefetched. +func NewReaderN(r io.Reader, blockSize, blocks int) (*Reader, error) { + z := new(Reader) + z.blocks = blocks + z.blockSize = blockSize + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.multistream = true + + // Account for too small values + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + if err := z.readHeader(true); err != nil { + return nil, err + } + return z, nil +} + +// Reset discards the Reader z's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) error { + z.killReadAhead() + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.size = 0 + z.err = nil + z.multistream = true + + // Account for uninitialized values + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + + if z.blockPool == nil { + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + } + + return z.readHeader(true) +} + +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). +func get4(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func (z *Reader) readString() (string, error) { + var err error + needconv := false + for i := 0; ; i++ { + if i >= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needconv = true + } + if z.buf[i] == 0 { + // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). + if needconv { + s := make([]rune, 0, i) + for _, v := range z.buf[0:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[0:i]), nil + } + } +} + +func (z *Reader) read2() (uint32, error) { + _, err := io.ReadFull(z.r, z.buf[0:2]) + if err != nil { + return 0, err + } + return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil +} + +func (z *Reader) readHeader(save bool) error { + z.killReadAhead() + + _, err := io.ReadFull(z.r, z.buf[0:10]) + if err != nil { + return err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return ErrHeader + } + z.flg = z.buf[3] + if save { + z.ModTime = time.Unix(int64(get4(z.buf[4:8])), 0) + // z.buf[8] is xfl, ignored + z.OS = z.buf[9] + } + z.digest.Reset() + z.digest.Write(z.buf[0:10]) + + if z.flg&flagExtra != 0 { + n, err := z.read2() + if err != nil { + return err + } + data := make([]byte, n) + if _, err = io.ReadFull(z.r, data); err != nil { + return err + } + if save { + z.Extra = data + } + } + + var s string + if z.flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return err + } + if save { + z.Name = s + } + } + + if z.flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return err + } + if save { + z.Comment = s + } + } + + if z.flg&flagHdrCrc != 0 { + n, err := z.read2() + if err != nil { + return err + } + sum := z.digest.Sum32() & 0xFFFF + if n != sum { + return ErrHeader + } + } + + z.digest.Reset() + z.decompressor = flate.NewReader(z.r) + z.doReadAhead() + return nil +} + +func (z *Reader) killReadAhead() error { + z.mu.Lock() + defer z.mu.Unlock() + if z.activeRA { + if z.closeReader != nil { + close(z.closeReader) + } + + // Wait for decompressor to be closed and return error, if any. + e, ok := <-z.closeErr + z.activeRA = false + if !ok { + // Channel is closed, so if there was any error it has already been returned. + return nil + } + return e + } + return nil +} + +// Starts readahead. +// Will return on error (including io.EOF) +// or when z.closeReader is closed. +func (z *Reader) doReadAhead() { + z.mu.Lock() + defer z.mu.Unlock() + z.activeRA = true + + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + ra := make(chan read, z.blocks) + z.readAhead = ra + closeReader := make(chan struct{}, 0) + z.closeReader = closeReader + z.lastBlock = false + closeErr := make(chan error, 1) + z.closeErr = closeErr + z.size = 0 + z.roff = 0 + z.current = nil + decomp := z.decompressor + + go func() { + defer func() { + closeErr <- decomp.Close() + close(closeErr) + close(ra) + }() + + // We hold a local reference to digest, since + // it way be changed by reset. + digest := z.digest + var wg sync.WaitGroup + for { + var buf []byte + select { + case buf = <-z.blockPool: + case <-closeReader: + return + } + buf = buf[0:z.blockSize] + // Try to fill the buffer + n, err := io.ReadFull(decomp, buf) + if err == io.ErrUnexpectedEOF { + if n > 0 { + err = nil + } else { + // If we got zero bytes, we need to establish if + // we reached end of stream or truncated stream. + _, err = decomp.Read([]byte{}) + if err == io.EOF { + err = nil + } + } + } + if n < len(buf) { + buf = buf[0:n] + } + wg.Wait() + wg.Add(1) + go func() { + digest.Write(buf) + wg.Done() + }() + z.size += uint32(n) + + // If we return any error, out digest must be ready + if err != nil { + wg.Wait() + } + select { + case z.readAhead <- read{b: buf, err: err}: + case <-closeReader: + // Sent on close, we don't care about the next results + return + } + if err != nil { + return + } + } + }() +} + +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + if len(p) == 0 { + return 0, nil + } + + for { + if len(z.current) == 0 && !z.lastBlock { + read := <-z.readAhead + + if read.err != nil { + // If not nil, the reader will have exited + z.closeReader = nil + + if read.err != io.EOF { + z.err = read.err + return + } + if read.err == io.EOF { + z.lastBlock = true + err = nil + } + } + z.current = read.b + z.roff = 0 + } + avail := z.current[z.roff:] + if len(p) >= len(avail) { + // If len(p) >= len(current), return all content of current + n = copy(p, avail) + z.blockPool <- z.current + z.current = nil + if z.lastBlock { + err = io.EOF + break + } + } else { + // We copy as much as there is space for + n = copy(p, avail) + z.roff += n + } + return + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + z.err = err + return 0, err + } + crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) + sum := z.digest.Sum32() + if sum != crc32 || isize != z.size { + z.err = ErrChecksum + return 0, z.err + } + + // File is ok; should we attempt reading one more? + if !z.multistream { + return 0, io.EOF + } + + // Is there another? + if err = z.readHeader(false); err != nil { + z.err = err + return + } + + // Yes. Reset and read from it. + return z.Read(p) +} + +func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { + total := int64(0) + for { + if z.err != nil { + return total, z.err + } + // We write both to output and digest. + for { + // Read from input + read := <-z.readAhead + if read.err != nil { + // If not nil, the reader will have exited + z.closeReader = nil + + if read.err != io.EOF { + z.err = read.err + return total, z.err + } + if read.err == io.EOF { + z.lastBlock = true + err = nil + } + } + // Write what we got + n, err := w.Write(read.b) + if n != len(read.b) { + return total, io.ErrShortWrite + } + total += int64(n) + if err != nil { + return total, err + } + // Put block back + z.blockPool <- read.b + if z.lastBlock { + break + } + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + z.err = err + return total, err + } + crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) + sum := z.digest.Sum32() + if sum != crc32 || isize != z.size { + z.err = ErrChecksum + return total, z.err + } + // File is ok; should we attempt reading one more? + if !z.multistream { + return total, nil + } + + // Is there another? + err = z.readHeader(false) + if err == io.EOF { + return total, nil + } + if err != nil { + z.err = err + return total, err + } + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +func (z *Reader) Close() error { + return z.killReadAhead() +} diff --git a/vendor/github.com/klauspost/pgzip/gzip.go b/vendor/github.com/klauspost/pgzip/gzip.go new file mode 100644 index 0000000000..9b97a0f5a7 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/gzip.go @@ -0,0 +1,501 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pgzip + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + "sync" + "time" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/crc32" +) + +const ( + defaultBlockSize = 256 << 10 + tailSize = 16384 + defaultBlocks = 16 +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header + w io.Writer + level int + wroteHeader bool + blockSize int + blocks int + currentBuffer []byte + prevTail []byte + digest hash.Hash32 + size int + closed bool + buf [10]byte + errMu sync.RWMutex + err error + pushedErr chan struct{} + results chan result + dictFlatePool sync.Pool + dstPool sync.Pool + wg sync.WaitGroup +} + +type result struct { + result chan []byte + notifyWritten chan struct{} +} + +// Use SetConcurrency to finetune the concurrency level if needed. +// +// With this you can control the approximate size of your blocks, +// as well as how many you want to be processing in parallel. +// +// Default values for this is SetConcurrency(250000, 16), +// meaning blocks are split at 250000 bytes and up to 16 blocks +// can be processing at once before the writer blocks. +func (z *Writer) SetConcurrency(blockSize, blocks int) error { + if blockSize <= tailSize { + return fmt.Errorf("gzip: block size cannot be less than or equal to %d", tailSize) + } + if blocks <= 0 { + return errors.New("gzip: blocks cannot be zero or less") + } + if blockSize == z.blockSize && blocks == z.blocks { + return nil + } + z.blockSize = blockSize + z.results = make(chan result, blocks) + z.blocks = blocks + z.dstPool = sync.Pool{New: func() interface{} { return make([]byte, 0, blockSize+(blockSize)>>4) }} + return nil +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write or Close. The Comment and Name header fields are +// UTF-8 strings in Go, but the underlying format requires NUL-terminated ISO +// 8859-1 (Latin-1). NUL or non-Latin-1 runes in those strings will lead to an +// error on Write. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, or any +// integer value between BestSpeed and BestCompression inclusive. The error +// returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < ConstantCompression || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.SetConcurrency(defaultBlockSize, defaultBlocks) + z.init(w, level) + return z, nil +} + +// This function must be used by goroutines to set an +// error condition, since z.err access is restricted +// to the callers goruotine. +func (z *Writer) pushError(err error) { + z.errMu.Lock() + if z.err != nil { + z.errMu.Unlock() + return + } + z.err = err + close(z.pushedErr) + z.errMu.Unlock() +} + +func (z *Writer) init(w io.Writer, level int) { + z.wg.Wait() + digest := z.digest + if digest != nil { + digest.Reset() + } else { + digest = crc32.NewIEEE() + } + z.Header = Header{OS: 255} + z.w = w + z.level = level + z.digest = digest + z.pushedErr = make(chan struct{}, 0) + z.results = make(chan result, z.blocks) + z.err = nil + z.closed = false + z.Comment = "" + z.Extra = nil + z.ModTime = time.Time{} + z.wroteHeader = false + z.currentBuffer = nil + z.buf = [10]byte{} + z.prevTail = nil + z.size = 0 + if z.dictFlatePool.New == nil { + z.dictFlatePool.New = func() interface{} { + f, _ := flate.NewWriterDict(w, level, nil) + return f + } + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + if z.results != nil && !z.closed { + close(z.results) + } + z.SetConcurrency(defaultBlockSize, defaultBlocks) + z.init(w, z.level) +} + +// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). +func put2(p []byte, v uint16) { + p[0] = uint8(v >> 0) + p[1] = uint8(v >> 8) +} + +func put4(p []byte, v uint32) { + p[0] = uint8(v >> 0) + p[1] = uint8(v >> 8) + p[2] = uint8(v >> 16) + p[3] = uint8(v >> 24) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + put2(z.buf[0:2], uint16(len(b))) + _, err := z.w.Write(z.buf[0:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[0:1]) + return err +} + +// compressCurrent will compress the data currently buffered +// This should only be called from the main writer/flush/closer +func (z *Writer) compressCurrent(flush bool) { + r := result{} + r.result = make(chan []byte, 1) + r.notifyWritten = make(chan struct{}, 0) + select { + case z.results <- r: + case <-z.pushedErr: + return + } + + // If block given is more than twice the block size, split it. + c := z.currentBuffer + if len(c) > z.blockSize*2 { + c = c[:z.blockSize] + z.wg.Add(1) + go z.compressBlock(c, z.prevTail, r, false) + z.prevTail = c[len(c)-tailSize:] + z.currentBuffer = z.currentBuffer[z.blockSize:] + z.compressCurrent(flush) + // Last one flushes if needed + return + } + + z.wg.Add(1) + go z.compressBlock(c, z.prevTail, r, z.closed) + if len(c) > tailSize { + z.prevTail = c[len(c)-tailSize:] + } else { + z.prevTail = nil + } + z.currentBuffer = z.dstPool.Get().([]byte) + z.currentBuffer = z.currentBuffer[:0] + + // Wait if flushing + if flush { + <-r.notifyWritten + } +} + +// Returns an error if it has been set. +// Cannot be used by functions that are from internal goroutines. +func (z *Writer) checkError() error { + z.errMu.RLock() + err := z.err + z.errMu.RUnlock() + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed to output until +// the Writer is closed or Flush() is called. +// +// The function will return quickly, if there are unused buffers. +// The sent slice (p) is copied, and the caller is free to re-use the buffer +// when the function returns. +// +// Errors that occur during compression will be reported later, and a nil error +// does not signify that the compression succeeded (since it is most likely still running) +// That means that the call that returns an error may not be the call that caused it. +// Only Flush and Close functions are guaranteed to return any errors up to that point. +func (z *Writer) Write(p []byte) (int, error) { + if err := z.checkError(); err != nil { + return 0, err + } + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + put4(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + var n int + var err error + n, err = z.w.Write(z.buf[0:10]) + if err != nil { + z.pushError(err) + return n, err + } + if z.Extra != nil { + err = z.writeBytes(z.Extra) + if err != nil { + z.pushError(err) + return n, err + } + } + if z.Name != "" { + err = z.writeString(z.Name) + if err != nil { + z.pushError(err) + return n, err + } + } + if z.Comment != "" { + err = z.writeString(z.Comment) + if err != nil { + z.pushError(err) + return n, err + } + } + // Start receiving data from compressors + go func() { + listen := z.results + for { + r, ok := <-listen + // If closed, we are finished. + if !ok { + return + } + buf := <-r.result + n, err := z.w.Write(buf) + if err != nil { + z.pushError(err) + close(r.notifyWritten) + return + } + if n != len(buf) { + z.pushError(fmt.Errorf("gzip: short write %d should be %d", n, len(buf))) + close(r.notifyWritten) + return + } + z.dstPool.Put(buf) + close(r.notifyWritten) + } + }() + z.currentBuffer = make([]byte, 0, z.blockSize) + } + q := p + for len(q) > 0 { + length := len(q) + if length+len(z.currentBuffer) > z.blockSize { + length = z.blockSize - len(z.currentBuffer) + } + z.digest.Write(q[:length]) + z.currentBuffer = append(z.currentBuffer, q[:length]...) + if len(z.currentBuffer) >= z.blockSize { + z.compressCurrent(false) + if err := z.checkError(); err != nil { + return len(p) - len(q) - length, err + } + } + z.size += length + q = q[length:] + } + return len(p), z.checkError() +} + +// Step 1: compresses buffer to buffer +// Step 2: send writer to channel +// Step 3: Close result channel to indicate we are done +func (z *Writer) compressBlock(p, prevTail []byte, r result, closed bool) { + defer func() { + close(r.result) + z.wg.Done() + }() + buf := z.dstPool.Get().([]byte) + dest := bytes.NewBuffer(buf[:0]) + + compressor := z.dictFlatePool.Get().(*flate.Writer) + compressor.ResetDict(dest, prevTail) + compressor.Write(p) + + err := compressor.Flush() + if err != nil { + z.pushError(err) + return + } + if closed { + err = compressor.Close() + if err != nil { + z.pushError(err) + return + } + } + z.dictFlatePool.Put(compressor) + // Read back buffer + buf = dest.Bytes() + r.result <- buf +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if err := z.checkError(); err != nil { + return err + } + if z.closed { + return nil + } + if !z.wroteHeader { + _, err := z.Write(nil) + if err != nil { + return err + } + } + // We send current block to compression + z.compressCurrent(true) + + return z.checkError() +} + +// UncompressedSize will return the number of bytes written. +// pgzip only, not a function in the official gzip package. +func (z *Writer) UncompressedSize() int { + return z.size +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if err := z.checkError(); err != nil { + return err + } + if z.closed { + return nil + } + + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if err := z.checkError(); err != nil { + return err + } + } + z.compressCurrent(true) + if err := z.checkError(); err != nil { + return err + } + close(z.results) + put4(z.buf[0:4], z.digest.Sum32()) + put4(z.buf[4:8], uint32(z.size)) + _, err := z.w.Write(z.buf[0:8]) + if err != nil { + z.pushError(err) + return err + } + return nil +} diff --git a/vendor/github.com/openSUSE/umoci/.gitignore b/vendor/github.com/openSUSE/umoci/.gitignore new file mode 100644 index 0000000000..93b097e44e --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/.gitignore @@ -0,0 +1,4 @@ +/umoci +/cache +/umoci.cov* +/release diff --git a/vendor/github.com/openSUSE/umoci/.gitmodules b/vendor/github.com/openSUSE/umoci/.gitmodules new file mode 100644 index 0000000000..7299d93942 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/.gitmodules @@ -0,0 +1,3 @@ +[submodule ".site/themes/hugo-theme-learn"] + path = .site/themes/hugo-theme-learn + url = https://github.com/matcornic/hugo-theme-learn.git diff --git a/vendor/github.com/openSUSE/umoci/.lgtm b/vendor/github.com/openSUSE/umoci/.lgtm new file mode 100644 index 0000000000..a091606326 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/.lgtm @@ -0,0 +1,2 @@ +approvals = 1 +pattern = "^LGTM" diff --git a/vendor/github.com/openSUSE/umoci/.travis.yml b/vendor/github.com/openSUSE/umoci/.travis.yml new file mode 100644 index 0000000000..fe5f1add1e --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/.travis.yml @@ -0,0 +1,36 @@ +language: go + +# `make ci` uses Docker. +sudo: required +services: + - docker + +go: + - 1.x + +before_install: + - sudo sh -c "apt-get -qq update && apt-get install -y gcc-multilib" + - go get -u github.com/cpuguy83/go-md2man + - go get -u github.com/vbatts/git-validation + - go get -u golang.org/x/lint/golint + - go get -u github.com/securego/gosec/cmd/gosec + +env: + - DOCKER_IMAGE="opensuse/leap:latest" + - DOCKER_IMAGE="centos:latest" + - DOCKER_IMAGE="debian:latest" + - DOCKER_IMAGE="ubuntu:latest" + - DOCKER_IMAGE="fedora:latest" + +matrix: + fast_finish: true + allow_failures: + - env: DOCKER_IMAGE="fedora:latest" + +notifications: + email: false + +script: + - chmod a+rwx . # Necessary to make Travis co-operate with Docker. + - make GOARCH=386 local-validate-build # Make sure 32-bit builds work. + - make DOCKER_IMAGE=$DOCKER_IMAGE ci diff --git a/vendor/github.com/openSUSE/umoci/CHANGELOG.md b/vendor/github.com/openSUSE/umoci/CHANGELOG.md new file mode 100644 index 0000000000..66a2fae55d --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/CHANGELOG.md @@ -0,0 +1,404 @@ +# Change Log +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/) +and this project adheres to [Semantic Versioning](http://semver.org/). + +## [Unreleased] + +## [0.4.4] - 2019-01-30 +## Added +- Full-stack verification of blob hashes and descriptor sizes is now done on + all operations, improving our hardening against bad blobs (we already did + some verification of layer DiffIDs but this is far more thorough). + openSUSE/umoci#278 openSUSE/umoci#280 openSUSE/umoci#282 + +## [0.4.3] - 2018-11-11 +## Added +- All umoci commands that had `--history.*` options can now decide to omit a + history entry with `--no-history`. Note that while this is supported for + commands that create layers (`umoci repack`, `umoci insert`, and `umoci raw + add-layer`) it is not recommended to use it for those commands since it can + cause other tools to become confused when inspecting the image history. The + primary usecase is to allow `umoci config --no-history` to leave no traces in + the history. See SUSE/kiwi#871. openSUSE/umoci#270 +- `umoci insert` now has a `--tag` option that allows you to non-destructively + insert files into an image. The semantics match `umoci config --tag`. + openSUSE/umoci#273 + +## [0.4.2] - 2018-09-11 +## Added +- umoci now has an exposed Go API. At the moment it's unclear whether it will + be changed significantly, but at the least now users can use + umoci-as-a-library in a fairly sane way. openSUSE/umoci#245 +- Added `umoci unpack --keep-dirlinks` (in the same vein as rsync's flag with + the same name) which allows layers that contain entries which have a symlink + as a path component. openSUSE/umoci#246 +- `umoci insert` now supports whiteouts in two significant ways. You can use + `--whiteout` to "insert" a deletion of a given path, while you can use + `--opaque` to replace a directory by adding an opaque whiteout (the default + behaviour causes the old and new directories to be merged). + openSUSE/umoci#257 + +## Fixed +- Docker has changed how they handle whiteouts for non-existent files. The + specification is loose on this (and in umoci we've always been liberal with + whiteout generation -- to avoid cases where someone was confused we didn't + have a whiteout for every entry). But now that they have deviated from the + spec, in the interest of playing nice, we can just follow their new + restriction (even though it is not supported by the spec). This also makes + our layers *slightly* smaller. openSUSE/umoci#254 +- `umoci unpack` now no longer erases `system.nfs4_acl` and also has some more + sophisticated handling of forbidden xattrs. openSUSE/umoci#252 + openSUSE/umoci#248 +- `umoci unpack` now appears to work correctly on SELinux-enabled systems + (previously we had various issues where `umoci` wouldn't like it when it was + trying to ensure the filesystem was reproducibly generated and SELinux xattrs + would act strangely). To fix this, now `umoci unpack` will only cause errors + if it has been asked to change a forbidden xattr to a value different than + it's current on-disk value. openSUSE/umoci#235 openSUSE/umoci#259 + +## [0.4.1] - 2018-08-16 +### Added +- The number of possible tags that are now valid with `umoci` subcommands has + increased significantly due to an expansion in the specification of the + format of the `ref.name` annotation. To quote the specification, the + following is the EBNF of valid `refname` values. openSUSE/umoci#234 + ``` + refname ::= component ("/" component)* + component ::= alphanum (separator alphanum)* + alphanum ::= [A-Za-z0-9]+ + separator ::= [-._:@+] | "--" + ``` +- A new `umoci insert` subcommand which adds a given file to a path inside the + container. openSUSE/umoci#237 +- A new `umoci raw unpack` subcommand in order to allow users to unpack images + without needing a configuration or any of the manifest generation. + openSUSE/umoci#239 +- `umoci` how has a logo. Thanks to [Max Bailey][maxbailey] for contributing + this to the project. openSUSE/umoci#165 openSUSE/umoci#249 + +### Fixed +- `umoci unpack` now handles out-of-order regular whiteouts correctly (though + this ordering is not recommended by the spec -- nor is it required). This is + an extension of openSUSE/umoci#229 that was missed during review. + openSUSE/umoci#232 +- `umoci unpack` and `umoci repack` now make use of a far more optimised `gzip` + compression library. In some benchmarks this has resulted in `umoci repack` + speedups of up to 3x (though of course, you should do your own benchmarks). + `umoci unpack` unfortunately doesn't have as significant of a performance + improvement, due to the nature of `gzip` decompression (in future we may + switch to `zlib` wrappers). openSUSE/umoci#225 openSUSE/umoci#233 + +[maxbailey]: http://www.maxbailey.me/ + +## [0.4.0] - 2018-03-10 +### Added +- `umoci repack` now supports `--refresh-bundle` which will update the + OCI bundle's metadata (mtree and umoci-specific manifests) after packing the + image tag. This means that the bundle can be used as a base layer for + future diffs without needing to unpack the image again. openSUSE/umoci#196 +- Added a website, and reworked the documentation to be better structured. You + can visit the website at [`umo.ci`][umo.ci]. openSUSE/umoci#188 +- Added support for the `user.rootlesscontainers` specification, which allows + for persistent on-disk emulation of `chown(2)` inside rootless containers. + This implementation is interoperable with [@AkihiroSuda's `PRoot` + fork][as-proot-fork] (though we do not test its interoperability at the + moment) as both tools use [the same protobuf + specification][rootlesscontainers-proto]. openSUSE/umoci#227 +- `umoci unpack` now has support for opaque whiteouts (whiteouts which remove + all children of a directory in the lower layer), though `umoci repack` does + not currently have support for generating them. While this is technically a + spec requirement, through testing we've never encountered an actual user of + these whiteouts. openSUSE/umoci#224 openSUSE/umoci#229 +- `umoci unpack` will now use some rootless tricks inside user namespaces for + operations that are known to fail (such as `mknod(2)`) while other operations + will be carried out as normal (such as `lchown(2)`). It should be noted that + the `/proc/self/uid_map` checking we do can be tricked into not detecting + user namespaces, but you would need to be trying to break it on purpose. + openSUSE/umoci#171 openSUSE/umoci#230 + +### Fixed +- Fix a bug in our "parent directory restore" code, which is responsible for + ensuring that the mtime and other similar properties of a directory are not + modified by extraction inside said directory. The bug would manifest as + xattrs not being restored properly in certain edge-cases (which we + incidentally hit in a test-case). openSUSE/umoci#161 openSUSE/umoci#162 +- `umoci unpack` will now "clean up" the bundle generated if an error occurs + during unpacking. Previously this didn't happen, which made cleaning up the + responsibility of the caller (which was quite difficult if you were + unprivileged). This is a breaking change, but is in the error path so it's + not critical. openSUSE/umoci#174 openSUSE/umoci#187 +- `umoci gc` now will no longer remove unknown files and directories that + aren't `flock(2)`ed, thus ensuring that any possible OCI image-spec + extensions or other users of an image being operated on will no longer + break. openSUSE/umoci#198 +- `umoci unpack --rootless` will now correctly handle regular file unpacking + when overwriting a file that `umoci` doesn't have write access to. In + addition, the semantics of pre-existing hardlinks to a clobbered file are + clarified (the hard-links will not refer to the new layer's inode). + openSUSE/umoci#222 openSUSE/umoci#223 + +[as-proot-fork]: https://github.com/AkihiroSuda/runrootless +[rootlesscontainers-proto]: https://rootlesscontaine.rs/proto/rootlesscontainers.proto +[umo.ci]: https://umo.ci/ + +## [0.3.1] - 2017-10-04 +### Fixed +- Fix several minor bugs in `hack/release.sh` that caused the release artefacts + to not match the intended style, as well as making it more generic so other + projects can use it. openSUSE/umoci#155 openSUSE/umoci#163 +- A recent configuration issue caused `go vet` and `go lint` to not run as part + of our CI jobs. This means that some of the information submitted as part of + [CII best practices badging][cii] was not accurate. This has been corrected, + and after review we concluded that only stylistic issues were discovered by + static analysis. openSUSE/umoci#158 +- 32-bit unit test builds were broken in a refactor in [0.3.0]. This has been + fixed, and we've added tests to our CI to ensure that something like this + won't go unnoticed in the future. openSUSE/umoci#157 +- `umoci unpack` would not correctly preserve set{uid,gid} bits. While this + would not cause issues when building an image (as we only create a manifest + of the final extracted rootfs), it would cause issues for other users of + `umoci`. openSUSE/umoci#166 openSUSE/umoci#169 +- Updated to [v0.4.1 of `go-mtree`][gomtree-v0.4.1], which fixes several minor + bugs with manifest generation. openSUSE/umoci#176 +- `umoci unpack` would not handle "weird" tar archive layers previously (it + would error out with DiffID errors). While this wouldn't cause issues for + layers generated using Go's `archive/tar` implementation, it would cause + issues for GNU gzip and other such tools. openSUSE/umoci#178 + openSUSE/umoci#179 + +### Changed +- `umoci unpack`'s mapping options (`--uid-map` and `--gid-map`) have had an + interface change, to better match the [`user_namespaces(7)`][user_namespaces] + interfaces. Note that this is a **breaking change**, but the workaround is to + switch to the trivially different (but now more consistent) format. + openSUSE/umoci#167 + +### Security +- `umoci unpack` used to create the bundle and rootfs with world + read-and-execute permissions by default. This could potentially result in an + unsafe rootfs (containing dangerous setuid binaries for instance) being + accessible by an unprivileged user. This has been fixed by always setting the + mode of the bundle to `0700`, which requires a user to explicitly work around + this basic protection. This scenario was documented in our security + documentation previously, but has now been fixed. openSUSE/umoci#181 + openSUSE/umoci#182 + +[cii]: https://bestpractices.coreinfrastructure.org/projects/1084 +[gomtree-v0.4.1]: https://github.com/vbatts/go-mtree/releases/tag/v0.4.1 +[user_namespaces]: http://man7.org/linux/man-pages/man7/user_namespaces.7.html + +## [0.3.0] - 2017-07-20 +### Added +- `umoci` now passes all of the requirements for the [CII best practices bading + program][cii]. openSUSE/umoci#134 +- `umoci` also now has more extensive architecture, quick-start and roadmap + documentation. openSUSE/umoci#134 +- `umoci` now supports [`1.0.0` of the OCI image + specification][ispec-v1.0.0] and [`1.0.0` of the OCI runtime + specification][rspec-v1.0.0], which are the first milestone release. Note + that there are still some remaining UX issues with `--image` and other parts + of `umoci` which may be subject to change in future versions. In particular, + this update of the specification now means that images may have ambiguous + tags. `umoci` will warn you if an operation may have an ambiguous result, but + we plan to improve this functionality far more in the future. + openSUSE/umoci#133 openSUSE/umoci#142 +- `umoci` also now supports more complicated descriptor walk structures, and + also handles mutation of such structures more sanely. At the moment, this + functionality has not been used "in the wild" and `umoci` doesn't have the UX + to create such structures (yet) but these will be implemented in future + versions. openSUSE/umoci#145 +- `umoci repack` now supports `--mask-path` to ignore changes in the rootfs + that are in a child of at least one of the provided masks when generating new + layers. openSUSE/umoci#127 + +### Changed +- Error messages from `github.com/openSUSE/umoci/oci/cas/drivers/dir` actually + make sense now. openSUSE/umoci#121 +- `umoci unpack` now generates `config.json` blobs according to the [still + proposed][ispec-pr492] OCI image specification conversion document. + openSUSE/umoci#120 +- `umoci repack` also now automatically adding `Config.Volumes` from the image + configuration to the set of masked paths. This matches recently added + [recommendations by the spec][ispec-pr694], but is a backwards-incompatible + change because the new default is that `Config.Volumes` **will** be masked. + If you wish to retain the old semantics, use `--no-mask-volumes` (though make + sure to be aware of the reasoning behind `Config.Volume` masking). + openSUSE/umoci#127 +- `umoci` now uses [`SecureJoin`][securejoin] rather than a patched version of + `FollowSymlinkInScope`. The two implementations are roughly equivalent, but + `SecureJoin` has a nicer API and is maintained as a separate project. +- Switched to using `golang.org/x/sys/unix` over `syscall` where possible, + which makes the codebase significantly cleaner. openSUSE/umoci#141 + +[cii]: https://bestpractices.coreinfrastructure.org/projects/1084 +[rspec-v1.0.0]: https://github.com/opencontainers/runtime-spec/releases/tag/v1.0.0 +[ispec-v1.0.0]: https://github.com/opencontainers/image-spec/releases/tag/v1.0.0 +[ispec-pr492]: https://github.com/opencontainers/image-spec/pull/492 +[ispec-pr694]: https://github.com/opencontainers/image-spec/pull/694 +[securejoin]: https://github.com/cyphar/filepath-securejoin + +## [0.2.1] - 2017-04-12 +### Added +- `hack/release.sh` automates the process of generating all of the published + artefacts for releases. The new script also generates signed source code + archives. openSUSE/umoci#116 + +### Changed +- `umoci` now outputs configurations that are compliant with [`v1.0.0-rc5` of + the OCI runtime-spec][rspec-v1.0.0-rc5]. This means that now you can use runc + v1.0.0-rc3 with `umoci` (and rootless containers should work out of the box + if you use a development build of runc). openSUSE/umoci#114 +- `umoci unpack` no longer adds a dummy linux.seccomp entry, and instead just + sets it to null. openSUSE/umoci#114 + +[rspec-v1.0.0-rc5]: https://github.com/opencontainers/runtime-spec/releases/tag/v1.0.0-rc5 + +## [0.2.0] - 2017-04-11 +### Added +- `umoci` now has some automated scripts for generated RPMs that are used in + openSUSE to automatically submit packages to OBS. openSUSE/umoci#101 +- `--clear=config.{cmd,entrypoint}` is now supported. While this interface is a + bit weird (`cmd` and `entrypoint` aren't treated atomically) this makes the + UX more consistent while we come up with a better `cmd` and `entrypoint` UX. + openSUSE/umoci#107 +- New subcommand: `umoci raw runtime-config`. It generates the runtime-spec + config.json for a particular image without also unpacking the root + filesystem, allowing for users of `umoci` that are regularly parsing + `config.json` without caring about the root filesystem to be more efficient. + However, a downside of this approach is that some image-spec fields + (`Config.User`) require a root filesystem in order to make sense, which is + why this command is hidden under the `umoci-raw(1)` subcommand (to make sure + only users that understand what they're doing use it). openSUSE/umoci#110 + +### Changed +- `umoci`'s `oci/cas` and `oci/config` libraries have been massively refactored + and rewritten, to allow for third-parties to use the OCI libraries. The plan + is for these to eventually become part of an OCI project. openSUSE/umoci#90 +- The `oci/cas` interface has been modifed to switch from `*ispec.Descriptor` + to `ispec.Descriptor`. This is a breaking, but fairly insignificant, change. + openSUSE/umoci#89 + +### Fixed +- `umoci` now uses an updated version of `go-mtree`, which has a complete + rewrite of `Vis` and `Unvis`. The rewrite ensures that unicode handling is + handled in a far more consistent and sane way. openSUSE/umoci#88 +- `umoci` used to set `process.user.additionalGids` to the "normal value" when + unpacking an image in rootless mode, causing issues when trying to actually + run said bundle with runC. openSUSE/umoci#109 + +## [0.1.0] - 2017-02-11 +### Added +- `CHANGELOG.md` has now been added. openSUSE/umoci#76 + +### Changed +- `umoci` now supports `v1.0.0-rc4` images, which has made fairly minimal + changes to the schema (mainly related to `mediaType`s). While this change + **is** backwards compatible (several fields were removed from the schema, but + the specification allows for "additional fields"), tools using older versions + of the specification may fail to operate on newer OCI images. There was no UX + change associated with this update. + +### Fixed +- `umoci tag` would fail to clobber existing tags, which was in contrast to how + the rest of the tag clobbering commands operated. This has been fixed and is + now consistent with the other commands. openSUSE/umoci#78 +- `umoci repack` now can correctly handle unicode-encoded filenames, allowing + the creation of containers that have oddly named files. This required fixes + to go-mtree (where the issue was). openSUSE/umoci#80 + +## [0.0.0] - 2017-02-07 +### Added +- Unit tests are massively expanded, as well as the integration tests. + openSUSE/umoci#68 openSUSE/umoci#69 +- Full coverage profiles (unit+integration) are generated to get all + information about how much code is tested. openSUSE/umoci#68 + openSUSE/umoci#69 + +### Fixed +- Static compilation now works properly. openSUSE/umoci#64 +- 32-bit architecture builds are fixed. openSUSE/umoci#70 + +### Changed +- Unit tests can now be run inside `%check` of an `rpmbuild` script, allowing + for proper testing. openSUSE/umoci#65. +- The logging output has been cleaned up to be much nicer for end-users to + read. openSUSE/umoci#73 +- Project has been moved to an openSUSE project. openSUSE/umoci#75 + +## [0.0.0-rc3] - 2016-12-19 +### Added +- `unpack`, `repack`: `xattr` support which also handles `security.selinux.*` + difficulties. openSUSE/umoci#49 openSUSE/umoci#52 +- `config`, `unpack`: Ensure that environment variables are not duplicated in + the extracted or stored configurations. openSUSE/umoci#30 +- Add support for read-only CAS operations for read-only filesystems. + openSUSE/umoci#47 +- Add some helpful output about `--rootless` if `umoci` fails with `EPERM`. +- Enable stack traces with errors if the `--debug` flag was given to `umoci`. + This requires a patch to `pkg/errors`. + +### Changed +- `gc`: Garbage collection now also garbage collects temporary directories. + openSUSE/umoci#17 +- Clean-ups to vendoring of `go-mtree` so that it's much more + upstream-friendly. + +## [0.0.0-rc2] - 2016-12-12 +### Added +- `unpack`, `repack`: Support for rootless unpacking and repacking. + openSUSE/umoci#26 +- `unpack`, `repack`: UID and GID mapping when unpacking and repacking. + openSUSE/umoci#26 +- `tag`, `rm`, `ls`: Tag modification commands such as `umoci tag`, `umoci rm` + and `umoci ls`. openSUSE/umoci#6 openSUSE/umoci#27 +- `stat`: Output information about an image. Currently only shows the history + information. Only the **JSON** output is stable. openSUSE/umoci#38 +- `init`, `new`: New commands have been created to allow for image creation + from scratch. openSUSE/umoci#5 openSUSE/umoci#42 +- `gc`: Garbage collection of images. openSUSE/umoci#6 +- Full integration and unit testing, with OCI validation to ensure that we + always create valid images. openSUSE/umoci#12 + +### Changed +- `unpack`, `repack`: Create history entries automatically (with options to + modify the entries). openSUSE/umoci#36 +- `unpack`: Store information about its source to ensure consistency when doing + a `repack`. openSUSE/umoci#14 +- The `--image` and `--from` arguments have been combined into a single + `[:]` argument for `--image`. openSUSE/umoci#39 +- `unpack`: Configuration annotations are now extracted, though there are still + some discussions happening upstream about the correct way of doing this. + openSUSE/umoci#43 + +### Fixed +- `repack`: Errors encountered during generation of delta layers are now + correctly propagated. openSUSE/umoci#33 +- `unpack`: Hardlinks are now extracted as real hardlinks. openSUSE/umoci#25 + +### Security +- `unpack`, `repack`: Symlinks are now correctly resolved inside the unpacked + rootfs. openSUSE/umoci#27 + +## 0.0.0-rc1 - 2016-11-10 +### Added +- Proof of concept with major functionality implemented. + + `unpack` + + `repack` + + `config` + +[Unreleased]: https://github.com/openSUSE/umoci/compare/v0.4.4...HEAD +[0.4.4]: https://github.com/openSUSE/umoci/compare/v0.4.3...v0.4.4 +[0.4.3]: https://github.com/openSUSE/umoci/compare/v0.4.2...v0.4.3 +[0.4.2]: https://github.com/openSUSE/umoci/compare/v0.4.1...v0.4.2 +[0.4.1]: https://github.com/openSUSE/umoci/compare/v0.4.0...v0.4.1 +[0.4.0]: https://github.com/openSUSE/umoci/compare/v0.3.1...v0.4.0 +[0.3.1]: https://github.com/openSUSE/umoci/compare/v0.3.0...v0.3.1 +[0.3.0]: https://github.com/openSUSE/umoci/compare/v0.2.1...v0.3.0 +[0.2.1]: https://github.com/openSUSE/umoci/compare/v0.2.0...v0.2.1 +[0.2.0]: https://github.com/openSUSE/umoci/compare/v0.1.0...v0.2.0 +[0.1.0]: https://github.com/openSUSE/umoci/compare/v0.0.0...v0.1.0 +[0.0.0]: https://github.com/openSUSE/umoci/compare/v0.0.0-rc3...v0.0.0 +[0.0.0-rc3]: https://github.com/openSUSE/umoci/compare/v0.0.0-rc2...v0.0.0-rc3 +[0.0.0-rc2]: https://github.com/openSUSE/umoci/compare/v0.0.0-rc1...v0.0.0-rc2 diff --git a/vendor/github.com/openSUSE/umoci/CODE_OF_CONDUCT.md b/vendor/github.com/openSUSE/umoci/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..3e513317ed --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/CODE_OF_CONDUCT.md @@ -0,0 +1,30 @@ +## Code of Conduct ## + +As umoci is part of the openSUSE project, we adopt the openSUSE Code of +Conduct. This is enshrined in the [openSUSE Guiding +Principles][openSUSE-principles], in particular: + +> [We value] respect for other persons and their contributions, for other +> opinions and beliefs. We listen to arguments and address problems in a +> constructive and open way. We believe that a diverse community based on +> mutual respect is the base for a creative and productive environment enabling +> the project to be truly successful. We don't tolerate social discrimination +> and aim at creating an environment where people feel accepted and safe from +> offense. + +In case of an violation of these principles (or any reasonable interpretation +of the above principles), you may report it [to the maintainers](/MAINTAINERS). +If the maintainers are unable to handle this situation (or one of the +maintainers is responsible for a code of conduct violation), then the issue can +be escalated to the [openSUSE Board][openSUSE-board] who will be able to deal +with the violation. + +Both the maintainers and the Board assume all necessary rights to handle +violations in a reasonable and proportionate manner. However, violations that +happen outside of the openSUSE project and community *may* be decided to be +outside the jurisdiction of this Code of Conduct (depending on the +circumstances -- and, as above, the Board can be escalated to in case of +disagreement). + +[openSUSE-principles]: https://en.opensuse.org/openSUSE:Guiding_principles +[openSUSE-board]: https://en.opensuse.org/openSUSE:Board diff --git a/vendor/github.com/openSUSE/umoci/CONTRIBUTING.md b/vendor/github.com/openSUSE/umoci/CONTRIBUTING.md new file mode 100644 index 0000000000..6d24dbeaf9 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/CONTRIBUTING.md @@ -0,0 +1,145 @@ +## Contribution Guidelines ## + +If you're reading this, you're likely interested in contributing to this +project. That's great! The intention of this document is to describe the basic +requirements and rules-of-thumb for contributions. + +### Security Issues ### + +If you are reporting a security issue, do not create an issue or file a pull +request on GitHub. Instead, disclose the issue responsibly by sending an email +to . If you feel it is necessary you may also encrypt +your email with [Pretty Good Privacy (PGP)][pgp] using the PGP key +[`6FA1B3E3F9A18CDCBE6A2CF54A7BE7BF70DE9B9F`][pgp-key]. *In future, the above +email will be replaced with a mailing list as part of our ongoing effort to +reduce the bus factor of this project.* + +[pgp]: https://en.wikipedia.org/wiki/Pretty_Good_Privacy +[pgp-key]: http://pgp.mit.edu/pks/lookup?op=vindex&search=0x6FA1B3E3F9A18CDCBE6A2CF54A7BE7BF70DE9B9F + +### Issues ### + +If you have found a bug in this project or have a question, first make sure +that the issue you are facing has not already been reported by another user. If +the issue you are facing has already been reported and you have more +information to provide, feel free to add a follow-up comment (but avoid adding +"me too" style comments as it distracts from discussion). If you couldn't find +an existing report for your issue, feel free to [open a new issue][issue-new]. +If you do not wish to use proprietary software to submit an issue, you may send +an email to and I will submit an issue on your +behalf. + +When reporting an issue, please provide the following information (to the best +of your ability) so we can debug your issue far more easily: + +* The version of this project you are using. If you are not using the latest + version of this project, please try to reproduce your issue on the latest + version. + +* A (short) description of what you are trying to accomplish so as to avoid the + [XY problem][xy-problem]. + +* A minimal example of the bug with a contrast between what you expect to + happen versus what actually happened. + +[issue-new]: https://github.com/openSUSE/umoci/issues/new +[xy-problem]: http://xyproblem.info/ + +### Submitting Changes ### + +In order to submit a change, you may [create a pull request][pr-new]. If you +do not wish to use proprietary software to submit an pull request, you may send +an email to and I will submit a pull request on your +behalf. + +All changes should be based off the latest commit of the master branch of this +project. In order for a change to be merged into this project, it must fulfil +all of the following requirements (note that many of these only apply for major +changes): + +* All changes must pass the automated testing and continuous integration. This + means they must build successfully without errors, must not produce errors + from static analysis and must not break existing functionality. You can run + all of these tests on your local machine if you wish by reading through + `.travis.yml` and running the listed commands. + +* All changes must be formatted using the Go style conventions, which ensures + that code remains consistent. You can automatically format your code in any + given `file.go` using `go fmt -s -w file.go`. + +* Any significant changes (such as those that implement a feature or fix a bug) + must include an entry in the top-level [`CHANGELOG.md`][changelog] (see the + file for more details) that describes the change and links to the pull + request that implemented it (as well as issues that are being resolved). + +* Any feature change or bug fix should include one or more corresponding test + cases to ensure that the code is operating as intended. Significant features + warrant the addition of significant numbers of both integration and unit + tests. + +* Any feature change should include a corresponding change to the project + documentation describing the feature and how it should be used. + +If you miss any of the above things, don't worry we'll remind you and provide +help if you need any. In addition to the above requirements, your code will be +reviewed by the maintainer(s) of this project, using the looks-good-to-me +system (LGTM). All patches must have the approval of at least two maintainers +that did not author a change before they are merged (the only exception to this +is related to the approval of security patches -- which must be approved in +private instead -- and cases where there are not enough maintainers to fulfil +this requirement). + +Each commit should be self-contained and minimal (and should build and pass the +tests individually), and commit messages should follow the Linux kernel style +of commit messages. For more information see [§ 2 and 3 of +`submitting-patches.rst` from the Linux kernel source][lk-commit]. + +In addition, all commits must include a `Signed-off-by:` line in their +description. This indicates that you certify [the following statement, known as +the Developer Certificate of Origin][dco]). You can automatically add this line +to your commits by using `git commit -s --amend`. + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +[pr-new]: https://github.com/openSUSE/umoci/compare +[changelog]: /CHANGELOG.md +[lk-commit]: https://www.kernel.org/doc/Documentation/process/submitting-patches.rst +[dco]: https://developercertificate.org/ diff --git a/vendor/go4.org/LICENSE b/vendor/github.com/openSUSE/umoci/COPYING similarity index 99% rename from vendor/go4.org/LICENSE rename to vendor/github.com/openSUSE/umoci/COPYING index 8f71f43fee..d645695673 100644 --- a/vendor/go4.org/LICENSE +++ b/vendor/github.com/openSUSE/umoci/COPYING @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +179,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -199,4 +200,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/vendor/github.com/openSUSE/umoci/Dockerfile b/vendor/github.com/openSUSE/umoci/Dockerfile new file mode 100644 index 0000000000..6a9e22b273 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/Dockerfile @@ -0,0 +1,52 @@ +# umoci: Umoci Modifies Open Containers' Images +# Copyright (C) 2016, 2017, 2018 SUSE LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM opensuse/leap:latest +MAINTAINER "Aleksa Sarai " + +# We have to use out-of-tree repos because several packages haven't been merged +# into openSUSE:Factory yet. +RUN zypper ar -f -p 10 -g obs://Virtualization:containers obs-vc && \ + zypper --gpg-auto-import-keys -n ref && \ + zypper -n up +RUN zypper -n in \ + bats \ + git \ + "go>=1.11" \ + golang-github-cpuguy83-go-md2man \ + go-mtree \ + jq \ + libcap-progs \ + make \ + moreutils \ + oci-image-tools \ + oci-runtime-tools \ + python-setuptools python-xattr attr \ + skopeo \ + tar + +ENV GOPATH /go +ENV PATH $GOPATH/bin:$PATH +RUN go get -u golang.org/x/lint/golint && \ + go get -u github.com/vbatts/git-validation && \ + go get -u github.com/securego/gosec/cmd/gosec + +ENV SOURCE_IMAGE=/opensuse SOURCE_TAG=latest +ARG DOCKER_IMAGE=opensuse/amd64:tumbleweed +RUN skopeo copy docker://$DOCKER_IMAGE oci:$SOURCE_IMAGE:$SOURCE_TAG + +VOLUME ["/go/src/github.com/openSUSE/umoci"] +WORKDIR /go/src/github.com/openSUSE/umoci +COPY . /go/src/github.com/openSUSE/umoci diff --git a/vendor/github.com/openSUSE/umoci/GOVERNANCE.md b/vendor/github.com/openSUSE/umoci/GOVERNANCE.md new file mode 100644 index 0000000000..6229df2b16 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/GOVERNANCE.md @@ -0,0 +1,18 @@ +## Governance Model ## + +umoci currently operates under a BDFL (Benevolent Dictator for Life) model. All +final decisions are made by the project owner (who is currently Aleksa Sarai). + +When more maintainers join the project, the governance model will likely change +to some kind of consensus model -- but until such a change to the governance +model is made (and accepted by the BDFL), the BDFL will remain unchanged. + +As an openSUSE project, there is an "escape hatch" governance in the form of +the [openSUSE Board][openSUSE-board]. In incredibly rare (and extreme) cases +where a decision about this project is asked of the openSUSE Board, they can +override decisions made by the BDFL (with the understanding that the openSUSE +Board's main goal is to reduce friction between parties, not to act as a +general appellate court). This "escape hatch" is invalidated if umoci ceases to +be an openSUSE project. + +[openSUSE-board]: https://en.opensuse.org/openSUSE:Board diff --git a/vendor/github.com/openSUSE/umoci/MAINTAINERS b/vendor/github.com/openSUSE/umoci/MAINTAINERS new file mode 100644 index 0000000000..1241335d92 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/MAINTAINERS @@ -0,0 +1 @@ +Aleksa Sarai (@cyphar) diff --git a/vendor/github.com/openSUSE/umoci/Makefile b/vendor/github.com/openSUSE/umoci/Makefile new file mode 100644 index 0000000000..0e516af419 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/Makefile @@ -0,0 +1,192 @@ +# umoci: Umoci Modifies Open Containers' Images +# Copyright (C) 2016, 2017, 2018 SUSE LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Use bash, so that we can do process substitution. +SHELL = /bin/bash + +# Go tools. +GO ?= go +GO_MD2MAN ?= go-md2man +export GO111MODULE=off + +# Set up the ... lovely ... GOPATH hacks. +PROJECT := github.com/openSUSE/umoci +CMD := ${PROJECT}/cmd/umoci + +# We use Docker because Go is just horrific to deal with. +UMOCI_IMAGE := umoci_dev +DOCKER_RUN := docker run --rm -it --security-opt apparmor:unconfined --security-opt label:disable -v ${PWD}:/go/src/${PROJECT} + +# Output directory. +BUILD_DIR ?= . + +# Release information. +GPG_KEYID ?= + +# Version information. +VERSION := $(shell cat ./VERSION) +COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true) +COMMIT := $(if $(shell git status --porcelain --untracked-files=no),"${COMMIT_NO}-dirty","${COMMIT_NO}") + +# Basic build flags. +BUILD_FLAGS ?= +BASE_FLAGS := ${BUILD_FLAGS} -tags "${BUILDTAGS}" +BASE_LDFLAGS := -s -w -X main.gitCommit=${COMMIT} -X main.version=${VERSION} + +# Specific build flags for build type. +DYN_BUILD_FLAGS := ${BASE_FLAGS} -buildmode=pie -ldflags "${BASE_LDFLAGS}" +TEST_BUILD_FLAGS := ${BASE_FLAGS} -buildmode=pie -ldflags "${BASE_LDFLAGS} -X ${PROJECT}/pkg/testutils.binaryType=test" +STATIC_BUILD_FLAGS := ${BASE_FLAGS} -ldflags "${BASE_LDFLAGS} -extldflags '-static'" + +# Installation directories. +DESTDIR ?= +PREFIX ?=/usr +BINDIR ?=$(PREFIX)/bin +MANDIR ?=$(PREFIX)/share/man + +.DEFAULT: umoci + +GO_SRC = $(shell find . -name \*.go) + +# NOTE: If you change these make sure you also update local-validate-build. + +umoci: $(GO_SRC) + $(GO) build ${DYN_BUILD_FLAGS} -o $(BUILD_DIR)/$@ ${CMD} + +umoci.static: $(GO_SRC) + env CGO_ENABLED=0 $(GO) build ${STATIC_BUILD_FLAGS} -o $(BUILD_DIR)/$@ ${CMD} + +umoci.cover: $(GO_SRC) + $(GO) test -c -cover -covermode=count -coverpkg=./... ${TEST_BUILD_FLAGS} -o $(BUILD_DIR)/$@ ${CMD} + +.PHONY: release +release: + hack/release.sh -S "$(GPG_KEYID)" -r release/$(VERSION) -v $(VERSION) + +.PHONY: install +install: umoci doc + install -D -m0755 umoci $(DESTDIR)/$(BINDIR)/umoci + -for man in $(MANPAGES); do \ + filename="$$(basename -- "$$man")"; \ + target="$(DESTDIR)/$(MANDIR)/man$${filename##*.}/$$filename"; \ + install -D -m0644 "$$man" "$$target"; \ + gzip -9f "$$target"; \ + done + +.PHONY: uninstall +uninstall: + rm -f $(DESTDIR)/$(BINDIR)/umoci + -rm -f $(DESTDIR)/$(MANDIR)/man*/umoci* + +.PHONY: clean +clean: + rm -f umoci umoci.static umoci.cov* + rm -f $(MANPAGES) + +.PHONY: validate +validate: umociimage + $(DOCKER_RUN) $(UMOCI_IMAGE) make local-validate + +.PHONY: local-validate +local-validate: local-validate-git local-validate-go local-validate-reproducible local-validate-build + +# TODO: Remove the special-case ignored system/* warnings. +.PHONY: local-validate-go +local-validate-go: + @type gofmt >/dev/null 2>/dev/null || (echo "ERROR: gofmt not found." && false) + test -z "$$(gofmt -s -l . | grep -vE '^vendor/|^third_party/' | tee /dev/stderr)" + @type golint >/dev/null 2>/dev/null || (echo "ERROR: golint not found." && false) + test -z "$$(golint $(PROJECT)/... | grep -vE '/vendor/|/third_party/' | tee /dev/stderr)" + @go doc cmd/vet >/dev/null 2>/dev/null || (echo "ERROR: go vet not found." && false) + test -z "$$($(GO) vet $$($(GO) list $(PROJECT)/... | grep -vE '/vendor/|/third_party/') 2>&1 | tee /dev/stderr)" + @type gosec >/dev/null 2>/dev/null || (echo "ERROR: gosec not found." && false) + test -z "$$(gosec -quiet -exclude=G301,G302,G304 $$GOPATH/$(PROJECT)/... | tee /dev/stderr)" + ./hack/test-vendor.sh + +EPOCH_COMMIT ?= 97ecdbd53dcb72b7a0d62196df281f131dc9eb2f +.PHONY: local-validate-git +local-validate-git: + @type git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found." && false) +ifdef TRAVIS_COMMIT_RANGE + git-validation -q -run DCO,short-subject +else + git-validation -q -run DCO,short-subject -range $(EPOCH_COMMIT)..HEAD +endif + +# Make sure that our builds are reproducible even if you wait between them and +# the modified time of the files is different. +.PHONY: local-validate-reproducible +local-validate-reproducible: + mkdir -p .tmp-validate + make -B umoci && cp umoci .tmp-validate/umoci.a + @echo sleep 10s + @sleep 10s && touch $(GO_SRC) + make -B umoci && cp umoci .tmp-validate/umoci.b + diff -s .tmp-validate/umoci.{a,b} + sha256sum .tmp-validate/umoci.{a,b} + rm -r .tmp-validate/umoci.{a,b} + +.PHONY: local-validate-build +local-validate-build: + $(GO) build ${DYN_BUILD_FLAGS} -o /dev/null ${CMD} + env CGO_ENABLED=0 $(GO) build ${STATIC_BUILD_FLAGS} -o /dev/null ${CMD} + $(GO) test -run nothing ${DYN_BUILD_FLAGS} $(PROJECT)/... + +MANPAGES_MD := $(wildcard doc/man/*.md) +MANPAGES := $(MANPAGES_MD:%.md=%) + +doc/man/%.1: doc/man/%.1.md + $(GO_MD2MAN) -in $< -out $@ + +.PHONY: doc +doc: $(MANPAGES) + +# Used for tests. +DOCKER_IMAGE :=opensuse/amd64:tumbleweed + +.PHONY: umociimage +umociimage: + docker build -t $(UMOCI_IMAGE) --build-arg DOCKER_IMAGE=$(DOCKER_IMAGE) . + +ifndef COVERAGE +COVERAGE := $(shell mktemp --dry-run umoci.cov.XXXXXX) +endif + +.PHONY: test-unit +test-unit: umociimage + touch $(COVERAGE) && chmod a+rw $(COVERAGE) + $(DOCKER_RUN) -e COVERAGE=$(COVERAGE) --cap-add=SYS_ADMIN $(UMOCI_IMAGE) make local-test-unit + $(DOCKER_RUN) -e COVERAGE=$(COVERAGE) -u 1000:1000 --cap-drop=all $(UMOCI_IMAGE) make local-test-unit + +.PHONY: local-test-unit +local-test-unit: + GO=$(GO) COVER=1 hack/test-unit.sh + +.PHONY: test-integration +test-integration: umociimage + touch $(COVERAGE) && chmod a+rw $(COVERAGE) + $(DOCKER_RUN) -e COVERAGE=$(COVERAGE) $(UMOCI_IMAGE) make TESTS="${TESTS}" local-test-integration + $(DOCKER_RUN) -e COVERAGE=$(COVERAGE) -u 1000:1000 --cap-drop=all $(UMOCI_IMAGE) make TESTS="${TESTS}" local-test-integration + +.PHONY: local-test-integration +local-test-integration: umoci.cover + TESTS="${TESTS}" COVER=1 hack/test-integration.sh + +shell: umociimage + $(DOCKER_RUN) $(UMOCI_IMAGE) bash + +.PHONY: ci +ci: umoci umoci.cover doc local-validate test-unit test-integration + hack/ci-coverage.sh $(COVERAGE) diff --git a/vendor/github.com/openSUSE/umoci/README.md b/vendor/github.com/openSUSE/umoci/README.md new file mode 100644 index 0000000000..b37f3947b8 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/README.md @@ -0,0 +1,150 @@ +[![umoci](/contrib/logo/umoci-black.png)][umoci-site] + +[![Release](https://img.shields.io/github/release/openSUSE/umoci.svg)](https://github.com/openSUSE/umoci/releases/latest) +[![Build Status](https://img.shields.io/travis/openSUSE/umoci/master.svg)](https://travis-ci.org/openSUSE/umoci) +![License: Apache 2.0](https://img.shields.io/github/license/openSUSE/umoci.svg) + +[![Go Report Card](https://goreportcard.com/badge/github.com/openSUSE/umoci)](https://goreportcard.com/report/github.com/openSUSE/umoci) +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1084/badge)](https://bestpractices.coreinfrastructure.org/projects/1084) +[![DOI](https://zenodo.org/badge/72283469.svg)](https://zenodo.org/badge/latestdoi/72283469) + +**u**moci **m**odifies **O**pen **C**ontainer **i**mages. See [the official +website][umoci-site] for more accessible documentation about how to use umoci. + +umoci intends to be a complete manipulation tool for [OCI images][oci-image-spec]. +In particular, it should be seen as a more end-user-focused version of the +[`oci-image-tools` provided by the OCI][oci-image-tools]. The hope is that all +of this tooling will eventually be merged with the upstream repository, so that +it is always kept up-to-date by the Open Container Initiative community. + +However, currently there is a [lot][disc-1] [of][disc-2] [discussion][disc-3] +about the new tooling going into the OCI image tools, and right now I need +tooling that can abstract all of the internals of the OCI specification into a +single CLI interface. The main purpose of this tool is to serve as example of +what **I** would like to see in an `oci-image` tool. + +If you wish to provide feedback or contribute, read the +[`CONTRIBUTING.md`][contributing] for this project to refresh your knowledge +about how to submit good bug reports and patches. Information about how to +submit responsible security disclosures is also provided. + +[umoci-site]: https://umo.ci/ +[oci-image-spec]: https://github.com/opencontainers/image-spec +[oci-image-tools]: https://github.com/opencontainers/image-tools +[disc-1]: https://github.com/opencontainers/image-spec/pull/411 +[disc-2]: https://github.com/opencontainers/image-tools/pull/5 +[disc-3]: https://github.com/opencontainers/image-tools/pull/8 +[contributing]: /CONTRIBUTING.md + +### Releases ### + +We regularly publish [new releases][releases], with each release being given a +unique identifying version number (as governed by [Semantic Versioning +(SemVer)][semver]). Information about previous releases including the list of +new features, bug fixes and resolved security issues is available in the +[change log][changelog]. You can get pre-built binaries and corresponding +source code for each release from the [releases page][releases]. + +[semver]: http://semver.org/ +[changelog]: /CHANGELOG.md +[releases]: https://github.com/openSUSE/umoci/releases + +### Installation ### + +If you wish to build umoci from source, follow these steps to build in with +[golang](https://golang.org). + +```bash +GOPATH=$HOME +go get -d github.com/openSUSE/umoci +cd $GOPATH/src/github.com/openSUSE/umoci +make install +``` + +Your `umoci` binary will be in `$HOME/bin`. + +### Usage ### + +umoci has a subcommand-based command-line. For more detailed information, see +the generated man pages (which you can build with `make doc`). You can also +read through our [quick start guide][quickstart]. + +``` +% umoci --help +NAME: + umoci - umoci modifies Open Container images + +USAGE: + umoci [global options] command [command options] [arguments...] + +VERSION: + 0.3.1 + +AUTHOR(S): + Aleksa Sarai + +COMMANDS: + raw advanced internal image tooling + help, h Shows a list of commands or help for one command + + image: + config modifies the image configuration of an OCI image + unpack unpacks a reference into an OCI runtime bundle + repack repacks an OCI runtime bundle into a reference + new creates a blank tagged OCI image + tag creates a new tag in an OCI image + remove, rm removes a tag from an OCI image + stat displays status information of an image manifest + + layout: + gc garbage-collects an OCI image's blobs + init create a new OCI layout + list, ls lists the set of tags in an OCI image + +GLOBAL OPTIONS: + --verbose alias for --log=info + --log value set the log level (debug, info, [warn], error, fatal) (default: "warn") + --help, -h show help + --version, -v print the version +``` + +[quickstart]: https://umo.ci/quick-start/ + +### License ### + +umoci is licensed under the terms of the Apache 2.0 license. + +``` +umoci: Umoci Modifies Open Containers' Images +Copyright (C) 2016, 2017, 2018 SUSE LLC. +Copyright (C) 2018 Cisco Systems + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` + +### Citation ### + +If you have used umoci in your research, please cite it like you would any +other useful software. Here is a handy BibTex citation. + +``` +@misc{umoci, + title = {umoci - Standalone Tool For Manipulating Container Images}, + author = {Aleksa Sarai et al.}, + year = {2016}, + url = {https://umo.ci/}, + doi = {http://dx.doi.org/10.5281/zenodo.1188474}, +} +``` + +Thank you. diff --git a/vendor/github.com/openSUSE/umoci/VERSION b/vendor/github.com/openSUSE/umoci/VERSION new file mode 100644 index 0000000000..6f2743d65d --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/VERSION @@ -0,0 +1 @@ +0.4.4 diff --git a/vendor/github.com/openSUSE/umoci/api.go b/vendor/github.com/openSUSE/umoci/api.go new file mode 100644 index 0000000000..9ecac0a849 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/api.go @@ -0,0 +1,47 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2018 Cisco Systems + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package umoci + +import ( + "github.com/openSUSE/umoci/oci/cas/dir" + "github.com/openSUSE/umoci/oci/casext" + "github.com/pkg/errors" +) + +// OpenLayout opens an existing OCI image layout, and fails if it does not +// exist. +func OpenLayout(imagePath string) (casext.Engine, error) { + // Get a reference to the CAS. + engine, err := dir.Open(imagePath) + if err != nil { + return casext.Engine{}, errors.Wrap(err, "open CAS") + } + + return casext.NewEngine(engine), nil +} + +// CreateLayout creates an existing OCI image layout, and fails if it already +// exists. +func CreateLayout(imagePath string) (casext.Engine, error) { + err := dir.Create(imagePath) + if err != nil { + return casext.Engine{}, err + } + + return OpenLayout(imagePath) +} diff --git a/vendor/github.com/openSUSE/umoci/go.mod b/vendor/github.com/openSUSE/umoci/go.mod new file mode 100644 index 0000000000..56e2955487 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/go.mod @@ -0,0 +1,35 @@ +module github.com/openSUSE/umoci + +require ( + github.com/apex/log v1.1.0 + github.com/blang/semver v3.5.1+incompatible + github.com/cyphar/filepath-securejoin v0.2.2 + github.com/docker/go-units v0.3.3 + github.com/fatih/color v1.7.0 + github.com/golang/protobuf v1.2.0 + github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357 + github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0 + github.com/klauspost/compress v1.4.0 + github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 + github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 + github.com/klauspost/pgzip v0.0.0-20170402124221-0bf5dcad4ada + github.com/mattn/go-colorable v0.0.9 + github.com/mattn/go-isatty v0.0.3 + github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb + github.com/opencontainers/go-digest v1.0.0-rc1 + github.com/opencontainers/image-spec v1.0.0 + github.com/opencontainers/runtime-spec v1.0.0 + github.com/opencontainers/runtime-tools v0.7.0 + github.com/pkg/errors v0.8.0 + github.com/rootless-containers/proto v0.1.0 + github.com/sirupsen/logrus v1.0.6 + github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e + github.com/urfave/cli v1.20.0 + github.com/vbatts/go-mtree v0.4.3 + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 + github.com/xeipuuv/gojsonschema v0.0.0-20180719132039-b84684d0e066 + golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb + golang.org/x/net v0.0.0-20180801234040-f4c29de78a2a + golang.org/x/sys v0.0.0-20180801221139-3dc4335d56c7 +) diff --git a/vendor/github.com/openSUSE/umoci/go.sum b/vendor/github.com/openSUSE/umoci/go.sum new file mode 100644 index 0000000000..7597e343c7 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/go.sum @@ -0,0 +1,62 @@ +github.com/apex/log v1.1.0 h1:J5rld6WVFi6NxA6m8GJ1LJqu3+GiTFIt3mYv27gdQWI= +github.com/apex/log v1.1.0/go.mod h1:yA770aXIDQrhVOIGurT/pVdfCpSq1GQV/auzMN5fzvY= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357 h1:Rem2+U35z1QtPQc6r+WolF7yXiefXqDKyk+lN2pE164= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0 h1:j30noezaCfvNLcdMYSvHLv81DxYRSt1grlpseG67vhU= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v0.0.0-20170402124221-0bf5dcad4ada h1:ZHhgRyr+9LYwfuWChpSTCCe/07V26LEElTKUXj+2fAg= +github.com/klauspost/pgzip v0.0.0-20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb h1:e+l77LJOEqXTIQihQJVkA6ZxPOUmfPM5e4H7rcpgtSk= +github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.0 h1:jcw3cCH887bLKETGYpv8afogdYchbShR0eH6oD9d5PQ= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg= +github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.7.0 h1:MIjqgwi4ZC+eVNGiYotCUYuTfs/oWDEcigK9Ra5ruHU= +github.com/opencontainers/runtime-tools v0.7.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/rootless-containers/proto v0.1.0 h1:gS1JOMEtk1YDYHCzBAf/url+olMJbac7MTrgSeP6zh4= +github.com/rootless-containers/proto v0.1.0/go.mod h1:vgkUFZbQd0gcE/K/ZwtE4MYjZPu0UNHLXIQxhyqAFh8= +github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e h1:QjF5rxNgRSLHJDwKUvfYP3qOx1vTDzUi/+oSC8FXnCI= +github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vbatts/go-mtree v0.4.3 h1:IC2s9EpogK3QzU+VsfuEdM7POkwnW43XDGAWO2Rb1Bo= +github.com/vbatts/go-mtree v0.4.3/go.mod h1:3sazBqLG4bZYmgRTgdh9X3iKTzwBpp5CrREJDzrNSXY= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180719132039-b84684d0e066 h1:iBmpEMJZETMKCupjL9Q7X3Q5utIRnWGbls0TXTgD7JI= +github.com/xeipuuv/gojsonschema v0.0.0-20180719132039-b84684d0e066/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb h1:Ah9YqXLj6fEgeKqcmBuLCbAsrF3ScD7dJ/bYM0C6tXI= +golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180801234040-f4c29de78a2a h1:8fCF9zjAir2SP3N+axz9xs+0r4V8dqPzqsWO10t8zoo= +golang.org/x/net v0.0.0-20180801234040-f4c29de78a2a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sys v0.0.0-20180801221139-3dc4335d56c7 h1:tr8zzCvQ7aQaYQ5nOInKN9jN9ujD92ktUtiEpN/b+tE= +golang.org/x/sys v0.0.0-20180801221139-3dc4335d56c7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/openSUSE/umoci/oci/cas/README.md b/vendor/github.com/openSUSE/umoci/oci/cas/README.md new file mode 100644 index 0000000000..7d493d5df9 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/cas/README.md @@ -0,0 +1,10 @@ +### `umoci/oci/cas` ### + +This is a reimplemented version of the currently in-flight [`image-tools` CAS +PR][cas-pr], which combines the `cas` and `refs` interfaces into a single +`Engine` that represents the image. In addition, I've implemented more +auto-detection and creature comforts. + +When the PR is merged, these changes will probably go upstream as well. + +[cas-pr]: https://github.com/opencontainers/image-tools/pull/5 diff --git a/vendor/github.com/openSUSE/umoci/oci/cas/cas.go b/vendor/github.com/openSUSE/umoci/oci/cas/cas.go new file mode 100644 index 0000000000..1c8ccce479 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/cas/cas.go @@ -0,0 +1,109 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cas + +import ( + "fmt" + "io" + + // We need to include sha256 in order for go-digest to properly handle such + // hashes, since Go's crypto library like to lazy-load cryptographic + // libraries. + _ "crypto/sha256" + + "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/net/context" +) + +const ( + // BlobAlgorithm is the name of the only supported digest algorithm for blobs. + // FIXME: We can make this a list. + BlobAlgorithm = digest.SHA256 +) + +// Exposed errors. +var ( + // ErrNotExist is effectively an implementation-neutral version of + // os.ErrNotExist. + ErrNotExist = fmt.Errorf("no such blob or index") + + // ErrInvalid is returned when an image was detected as being invalid. + ErrInvalid = fmt.Errorf("invalid image detected") + + // ErrUnknownType is returned when an unknown (or otherwise unparseable) + // mediatype is encountered. Callers should not ignore this error unless it + // is in a context where ignoring it is more friendly to spec extensions. + ErrUnknownType = fmt.Errorf("unknown mediatype encountered") + + // ErrNotImplemented is returned when a requested operation has not been + // implementing the backing image store. + ErrNotImplemented = fmt.Errorf("operation not implemented") + + // ErrClobber is returned when a requested operation would require clobbering a + // reference or blob which already exists. + ErrClobber = fmt.Errorf("operation would clobber existing object") +) + +// Engine is an interface that provides methods for accessing and modifying an +// OCI image, namely allowing access to reference descriptors and blobs. +type Engine interface { + // PutBlob adds a new blob to the image. This is idempotent; a nil error + // means that "the content is stored at DIGEST" without implying "because + // of this PutBlob() call". + PutBlob(ctx context.Context, reader io.Reader) (digest digest.Digest, size int64, err error) + + // GetBlob returns a reader for retrieving a blob from the image, which the + // caller must Close(). Returns ErrNotExist if the digest is not found. + GetBlob(ctx context.Context, digest digest.Digest) (reader io.ReadCloser, err error) + + // PutIndex sets the index of the OCI image to the given index, replacing + // the previously existing index. This operation is atomic; any readers + // attempting to access the OCI image while it is being modified will only + // ever see the new or old index. + PutIndex(ctx context.Context, index ispec.Index) (err error) + + // GetIndex returns the index of the OCI image. Return ErrNotExist if the + // digest is not found. If the image doesn't have an index, ErrInvalid is + // returned (a valid OCI image MUST have an image index). + // + // It is not recommended that users of cas.Engine use this interface + // directly, due to the complication of properly handling references as + // well as correctly handling nested indexes. casext.Engine provides a + // wrapper for cas.Engine that implements various reference resolution + // functions that should work for most users. + GetIndex(ctx context.Context) (index ispec.Index, ierr error) + + // DeleteBlob removes a blob from the image. This is idempotent; a nil + // error means "the content is not in the store" without implying "because + // of this DeleteBlob() call". + DeleteBlob(ctx context.Context, digest digest.Digest) (err error) + + // ListBlobs returns the set of blob digests stored in the image. + ListBlobs(ctx context.Context) (digests []digest.Digest, err error) + + // Clean executes a garbage collection of any non-blob garbage in the store + // (this includes temporary files and directories not reachable from the + // CAS interface). This MUST NOT remove any blobs or references in the + // store. + Clean(ctx context.Context) (err error) + + // Close releases all references held by the engine. Subsequent operations + // may fail. + Close() (err error) +} diff --git a/vendor/github.com/openSUSE/umoci/oci/cas/dir/dir.go b/vendor/github.com/openSUSE/umoci/oci/cas/dir/dir.go new file mode 100644 index 0000000000..7bcc9a5773 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/cas/dir/dir.go @@ -0,0 +1,436 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package dir + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/apex/log" + "github.com/openSUSE/umoci/oci/cas" + "github.com/openSUSE/umoci/pkg/hardening" + "github.com/opencontainers/go-digest" + imeta "github.com/opencontainers/image-spec/specs-go" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +const ( + // ImageLayoutVersion is the version of the image layout we support. This + // value is *not* the same as imagespec.Version, and the meaning of this + // field is still under discussion in the spec. For now we'll just hardcode + // the value and hope for the best. + ImageLayoutVersion = "1.0.0" + + // blobDirectory is the directory inside an OCI image that contains blobs. + blobDirectory = "blobs" + + // indexFile is the file inside an OCI image that contains the top-level + // index. + indexFile = "index.json" + + // layoutFile is the file in side an OCI image the indicates what version + // of the OCI spec the image is. + layoutFile = "oci-layout" +) + +// blobPath returns the path to a blob given its digest, relative to the root +// of the OCI image. The digest must be of the form algorithm:hex. +func blobPath(digest digest.Digest) (string, error) { + if err := digest.Validate(); err != nil { + return "", errors.Wrapf(err, "invalid digest: %q", digest) + } + + algo := digest.Algorithm() + hash := digest.Hex() + + if algo != cas.BlobAlgorithm { + return "", errors.Errorf("unsupported algorithm: %q", algo) + } + + return filepath.Join(blobDirectory, algo.String(), hash), nil +} + +type dirEngine struct { + path string + temp string + tempFile *os.File +} + +func (e *dirEngine) ensureTempDir() error { + if e.temp == "" { + tempDir, err := ioutil.TempDir(e.path, ".umoci-") + if err != nil { + return errors.Wrap(err, "create tempdir") + } + + // We get an advisory lock to ensure that GC() won't delete our + // temporary directory here. Once we get the lock we know it won't do + // anything until we unlock it or exit. + + e.tempFile, err = os.Open(tempDir) + if err != nil { + return errors.Wrap(err, "open tempdir for lock") + } + if err := unix.Flock(int(e.tempFile.Fd()), unix.LOCK_EX|unix.LOCK_NB); err != nil { + return errors.Wrap(err, "lock tempdir") + } + + e.temp = tempDir + } + return nil +} + +// verify ensures that the image is valid. +func (e *dirEngine) validate() error { + content, err := ioutil.ReadFile(filepath.Join(e.path, layoutFile)) + if err != nil { + if os.IsNotExist(err) { + err = cas.ErrInvalid + } + return errors.Wrap(err, "read oci-layout") + } + + var ociLayout ispec.ImageLayout + if err := json.Unmarshal(content, &ociLayout); err != nil { + return errors.Wrap(err, "parse oci-layout") + } + + // XXX: Currently the meaning of this field is not adequately defined by + // the spec, nor is the "official" value determined by the spec. + if ociLayout.Version != ImageLayoutVersion { + return errors.Wrap(cas.ErrInvalid, "layout version is not supported") + } + + // Check that "blobs" and "index.json" exist in the image. + // FIXME: We also should check that blobs *only* contains a cas.BlobAlgorithm + // directory (with no subdirectories) and that refs *only* contains + // files (optionally also making sure they're all JSON descriptors). + if fi, err := os.Stat(filepath.Join(e.path, blobDirectory)); err != nil { + if os.IsNotExist(err) { + err = cas.ErrInvalid + } + return errors.Wrap(err, "check blobdir") + } else if !fi.IsDir() { + return errors.Wrap(cas.ErrInvalid, "blobdir is not a directory") + } + + if fi, err := os.Stat(filepath.Join(e.path, indexFile)); err != nil { + if os.IsNotExist(err) { + err = cas.ErrInvalid + } + return errors.Wrap(err, "check index") + } else if fi.IsDir() { + return errors.Wrap(cas.ErrInvalid, "index is a directory") + } + + return nil +} + +// PutBlob adds a new blob to the image. This is idempotent; a nil error +// means that "the content is stored at DIGEST" without implying "because +// of this PutBlob() call". +func (e *dirEngine) PutBlob(ctx context.Context, reader io.Reader) (digest.Digest, int64, error) { + if err := e.ensureTempDir(); err != nil { + return "", -1, errors.Wrap(err, "ensure tempdir") + } + + digester := cas.BlobAlgorithm.Digester() + + // We copy this into a temporary file because we need to get the blob hash, + // but also to avoid half-writing an invalid blob. + fh, err := ioutil.TempFile(e.temp, "blob-") + if err != nil { + return "", -1, errors.Wrap(err, "create temporary blob") + } + tempPath := fh.Name() + defer fh.Close() + + writer := io.MultiWriter(fh, digester.Hash()) + size, err := io.Copy(writer, reader) + if err != nil { + return "", -1, errors.Wrap(err, "copy to temporary blob") + } + if err := fh.Close(); err != nil { + return "", -1, errors.Wrap(err, "close temporary blob") + } + + // Get the digest. + path, err := blobPath(digester.Digest()) + if err != nil { + return "", -1, errors.Wrap(err, "compute blob name") + } + + // Move the blob to its correct path. + path = filepath.Join(e.path, path) + if err := os.Rename(tempPath, path); err != nil { + return "", -1, errors.Wrap(err, "rename temporary blob") + } + + return digester.Digest(), int64(size), nil +} + +// GetBlob returns a reader for retrieving a blob from the image, which the +// caller must Close(). Returns os.ErrNotExist if the digest is not found. +func (e *dirEngine) GetBlob(ctx context.Context, digest digest.Digest) (io.ReadCloser, error) { + path, err := blobPath(digest) + if err != nil { + return nil, errors.Wrap(err, "compute blob path") + } + fh, err := os.Open(filepath.Join(e.path, path)) + return &hardening.VerifiedReadCloser{ + Reader: fh, + ExpectedDigest: digest, + ExpectedSize: int64(-1), // We don't know the expected size. + }, errors.Wrap(err, "open blob") +} + +// PutIndex sets the index of the OCI image to the given index, replacing the +// previously existing index. This operation is atomic; any readers attempting +// to access the OCI image while it is being modified will only ever see the +// new or old index. +func (e *dirEngine) PutIndex(ctx context.Context, index ispec.Index) error { + if err := e.ensureTempDir(); err != nil { + return errors.Wrap(err, "ensure tempdir") + } + + // We copy this into a temporary index to ensure the atomicity of this + // operation. + fh, err := ioutil.TempFile(e.temp, "index-") + if err != nil { + return errors.Wrap(err, "create temporary index") + } + tempPath := fh.Name() + defer fh.Close() + + // Encode the index. + if err := json.NewEncoder(fh).Encode(index); err != nil { + return errors.Wrap(err, "write temporary index") + } + if err := fh.Close(); err != nil { + return errors.Wrap(err, "close temporary index") + } + + // Move the blob to its correct path. + path := filepath.Join(e.path, indexFile) + if err := os.Rename(tempPath, path); err != nil { + return errors.Wrap(err, "rename temporary index") + } + return nil +} + +// GetIndex returns the index of the OCI image. Return ErrNotExist if the +// digest is not found. If the image doesn't have an index, ErrInvalid is +// returned (a valid OCI image MUST have an image index). +// +// It is not recommended that users of cas.Engine use this interface directly, +// due to the complication of properly handling references as well as correctly +// handling nested indexes. casext.Engine provides a wrapper for cas.Engine +// that implements various reference resolution functions that should work for +// most users. +func (e *dirEngine) GetIndex(ctx context.Context) (ispec.Index, error) { + content, err := ioutil.ReadFile(filepath.Join(e.path, indexFile)) + if err != nil { + if os.IsNotExist(err) { + err = cas.ErrInvalid + } + return ispec.Index{}, errors.Wrap(err, "read index") + } + + var index ispec.Index + if err := json.Unmarshal(content, &index); err != nil { + return ispec.Index{}, errors.Wrap(err, "parse index") + } + + return index, nil +} + +// DeleteBlob removes a blob from the image. This is idempotent; a nil +// error means "the content is not in the store" without implying "because +// of this DeleteBlob() call". +func (e *dirEngine) DeleteBlob(ctx context.Context, digest digest.Digest) error { + path, err := blobPath(digest) + if err != nil { + return errors.Wrap(err, "compute blob path") + } + + err = os.Remove(filepath.Join(e.path, path)) + if err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "remove blob") + } + return nil +} + +// ListBlobs returns the set of blob digests stored in the image. +func (e *dirEngine) ListBlobs(ctx context.Context) ([]digest.Digest, error) { + digests := []digest.Digest{} + blobDir := filepath.Join(e.path, blobDirectory, cas.BlobAlgorithm.String()) + + if err := filepath.Walk(blobDir, func(path string, _ os.FileInfo, _ error) error { + // Skip the actual directory. + if path == blobDir { + return nil + } + + // XXX: Do we need to handle multiple-directory-deep cases? + digest := digest.NewDigestFromHex(cas.BlobAlgorithm.String(), filepath.Base(path)) + digests = append(digests, digest) + return nil + }); err != nil { + return nil, errors.Wrap(err, "walk blobdir") + } + + return digests, nil +} + +// Clean executes a garbage collection of any non-blob garbage in the store +// (this includes temporary files and directories not reachable from the CAS +// interface). This MUST NOT remove any blobs or references in the store. +func (e *dirEngine) Clean(ctx context.Context) error { + // Remove every .umoci directory that isn't flocked. + matches, err := filepath.Glob(filepath.Join(e.path, ".umoci-*")) + if err != nil { + return errors.Wrap(err, "glob .umoci-*") + } + for _, path := range matches { + err = e.cleanPath(ctx, path) + if err != nil && err != filepath.SkipDir { + return err + } + } + + return nil +} + +func (e *dirEngine) cleanPath(ctx context.Context, path string) error { + cfh, err := os.Open(path) + if err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "open for locking") + } + defer cfh.Close() + + if err := unix.Flock(int(cfh.Fd()), unix.LOCK_EX|unix.LOCK_NB); err != nil { + // If we fail to get a flock(2) then it's probably already locked, + // so we shouldn't touch it. + return filepath.SkipDir + } + defer unix.Flock(int(cfh.Fd()), unix.LOCK_UN) + + if err := os.RemoveAll(path); os.IsNotExist(err) { + return nil // somebody else beat us to it + } else if err != nil { + log.Warnf("failed to remove %s: %v", path, err) + return filepath.SkipDir + } + log.Debugf("cleaned %s", path) + + return nil +} + +// Close releases all references held by the e. Subsequent operations may +// fail. +func (e *dirEngine) Close() error { + if e.temp != "" { + if err := unix.Flock(int(e.tempFile.Fd()), unix.LOCK_UN); err != nil { + return errors.Wrap(err, "unlock tempdir") + } + if err := e.tempFile.Close(); err != nil { + return errors.Wrap(err, "close tempdir") + } + if err := os.RemoveAll(e.temp); err != nil { + return errors.Wrap(err, "remove tempdir") + } + } + return nil +} + +// Open opens a new reference to the directory-backed OCI image referenced by +// the provided path. +func Open(path string) (cas.Engine, error) { + engine := &dirEngine{ + path: path, + temp: "", + } + + if err := engine.validate(); err != nil { + return nil, errors.Wrap(err, "validate") + } + + return engine, nil +} + +// Create creates a new OCI image layout at the given path. If the path already +// exists, os.ErrExist is returned. However, all of the parent components of +// the path will be created if necessary. +func Create(path string) error { + // We need to fail if path already exists, but we first create all of the + // parent paths. + dir := filepath.Dir(path) + if dir != "." { + if err := os.MkdirAll(dir, 0755); err != nil { + return errors.Wrap(err, "mkdir parent") + } + } + if err := os.Mkdir(path, 0755); err != nil { + return errors.Wrap(err, "mkdir") + } + + // Create the necessary directories and "oci-layout" file. + if err := os.Mkdir(filepath.Join(path, blobDirectory), 0755); err != nil { + return errors.Wrap(err, "mkdir blobdir") + } + if err := os.Mkdir(filepath.Join(path, blobDirectory, cas.BlobAlgorithm.String()), 0755); err != nil { + return errors.Wrap(err, "mkdir algorithm") + } + + indexFh, err := os.Create(filepath.Join(path, indexFile)) + if err != nil { + return errors.Wrap(err, "create index.json") + } + defer indexFh.Close() + + defaultIndex := ispec.Index{ + Versioned: imeta.Versioned{ + SchemaVersion: 2, // FIXME: This is hardcoded at the moment. + }, + } + if err := json.NewEncoder(indexFh).Encode(defaultIndex); err != nil { + return errors.Wrap(err, "encode index.json") + } + + layoutFh, err := os.Create(filepath.Join(path, layoutFile)) + if err != nil { + return errors.Wrap(err, "create oci-layout") + } + defer layoutFh.Close() + + ociLayout := ispec.ImageLayout{ + Version: ImageLayoutVersion, + } + if err := json.NewEncoder(layoutFh).Encode(ociLayout); err != nil { + return errors.Wrap(err, "encode oci-layout") + } + + // Everything is now set up. + return nil +} diff --git a/vendor/github.com/openSUSE/umoci/oci/casext/blob.go b/vendor/github.com/openSUSE/umoci/oci/casext/blob.go new file mode 100644 index 0000000000..4c91a61307 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/casext/blob.go @@ -0,0 +1,152 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package casext + +import ( + "encoding/json" + "io" + "io/ioutil" + + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Blob represents a "parsed" blob in an OCI image's blob store. MediaType +// offers a type-safe way of checking what the type of Data is. +type Blob struct { + // Descriptor is the {mediatype,digest,length} 3-tuple. Note that this + // isn't updated if the Data is modified. + Descriptor ispec.Descriptor + + // Data is the "parsed" blob taken from the OCI image's blob store, and is + // typed according to the media type. The mapping from MIME => type is as + // follows. + // + // ispec.MediaTypeDescriptor => ispec.Descriptor + // ispec.MediaTypeImageManifest => ispec.Manifest + // ispec.MediaTypeImageIndex => ispec.Index + // ispec.MediaTypeImageLayer => io.ReadCloser + // ispec.MediaTypeImageLayerGzip => io.ReadCloser + // ispec.MediaTypeImageLayerNonDistributable => io.ReadCloser + // ispec.MediaTypeImageLayerNonDistributableGzip => io.ReadCloser + // ispec.MediaTypeImageConfig => ispec.Image + // unknown => io.ReadCloser + Data interface{} +} + +func (b *Blob) isParseable() bool { + return b.Descriptor.MediaType == ispec.MediaTypeDescriptor || + b.Descriptor.MediaType == ispec.MediaTypeImageManifest || + b.Descriptor.MediaType == ispec.MediaTypeImageIndex || + b.Descriptor.MediaType == ispec.MediaTypeImageConfig +} + +func (b *Blob) load(ctx context.Context, engine Engine) (Err error) { + reader, err := engine.GetVerifiedBlob(ctx, b.Descriptor) + if err != nil { + return errors.Wrap(err, "get blob") + } + + if b.isParseable() { + defer func() { + if _, err := io.Copy(ioutil.Discard, reader); Err == nil { + Err = errors.Wrapf(err, "discard trailing %q blob", b.Descriptor.MediaType) + } + if err := reader.Close(); Err == nil { + Err = errors.Wrapf(err, "close %q blob", b.Descriptor.MediaType) + } + }() + } + + switch b.Descriptor.MediaType { + // ispec.MediaTypeDescriptor => ispec.Descriptor + case ispec.MediaTypeDescriptor: + parsed := ispec.Descriptor{} + if err := json.NewDecoder(reader).Decode(&parsed); err != nil { + return errors.Wrap(err, "parse MediaTypeDescriptor") + } + b.Data = parsed + + // ispec.MediaTypeImageManifest => ispec.Manifest + case ispec.MediaTypeImageManifest: + parsed := ispec.Manifest{} + if err := json.NewDecoder(reader).Decode(&parsed); err != nil { + return errors.Wrap(err, "parse MediaTypeImageManifest") + } + b.Data = parsed + + // ispec.MediaTypeImageIndex => ispec.Index + case ispec.MediaTypeImageIndex: + parsed := ispec.Index{} + if err := json.NewDecoder(reader).Decode(&parsed); err != nil { + return errors.Wrap(err, "parse MediaTypeImageIndex") + } + b.Data = parsed + + // ispec.MediaTypeImageConfig => ispec.Image + case ispec.MediaTypeImageConfig: + parsed := ispec.Image{} + if err := json.NewDecoder(reader).Decode(&parsed); err != nil { + return errors.Wrap(err, "parse MediaTypeImageConfig") + } + b.Data = parsed + + // unknown => io.ReadCloser() + default: + fallthrough + // ispec.MediaTypeImageLayer => io.ReadCloser + // ispec.MediaTypeImageLayerGzip => io.ReadCloser + // ispec.MediaTypeImageLayerNonDistributable => io.ReadCloser + // ispec.MediaTypeImageLayerNonDistributableGzip => io.ReadCloser + case ispec.MediaTypeImageLayer, ispec.MediaTypeImageLayerNonDistributable, + ispec.MediaTypeImageLayerGzip, ispec.MediaTypeImageLayerNonDistributableGzip: + // There isn't anything else we can practically do here. + b.Data = reader + } + + if b.Data == nil { + return errors.Errorf("[internal error] b.Data was nil after parsing") + } + return nil +} + +// Close cleans up all of the resources for the opened blob. +func (b *Blob) Close() error { + switch b.Descriptor.MediaType { + case ispec.MediaTypeImageLayer, ispec.MediaTypeImageLayerNonDistributable, + ispec.MediaTypeImageLayerGzip, ispec.MediaTypeImageLayerNonDistributableGzip: + if b.Data != nil { + return b.Data.(io.Closer).Close() + } + } + return nil +} + +// FromDescriptor parses the blob referenced by the given descriptor. +func (e Engine) FromDescriptor(ctx context.Context, descriptor ispec.Descriptor) (*Blob, error) { + blob := &Blob{ + Descriptor: descriptor, + Data: nil, + } + + if err := blob.load(ctx, e); err != nil { + return nil, errors.Wrap(err, "load") + } + return blob, nil +} diff --git a/vendor/github.com/openSUSE/umoci/oci/casext/casext.go b/vendor/github.com/openSUSE/umoci/oci/casext/casext.go new file mode 100644 index 0000000000..113ce8158d --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/casext/casext.go @@ -0,0 +1,38 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package casext provides extensions to the standard cas.Engine interface, +// allowing for generic functionality to be used on top of any implementation +// of cas.Engine. +package casext + +import "github.com/openSUSE/umoci/oci/cas" + +// TODO: Convert this to an interface and make Engine private. + +// Engine is a wrapper around cas.Engine that provides additional, generic +// extensions to the transport-dependent cas.Engine implementation. +type Engine struct { + cas.Engine +} + +// NewEngine returns a new Engine which acts as a wrapper around the given +// cas.Engine and provides additional, generic extensions to the +// transport-dependent cas.Engine implementation. +func NewEngine(engine cas.Engine) Engine { + return Engine{Engine: engine} +} diff --git a/vendor/github.com/openSUSE/umoci/oci/casext/gc.go b/vendor/github.com/openSUSE/umoci/oci/casext/gc.go new file mode 100644 index 0000000000..b8b00640cd --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/casext/gc.go @@ -0,0 +1,111 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package casext + +import ( + "github.com/apex/log" + "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// GC will perform a mark-and-sweep garbage collection of the OCI image +// referenced by the given CAS engine. The root set is taken to be the set of +// references stored in the image, and all blobs not reachable by following a +// descriptor path from the root set will be removed. +// +// GC will only call ListBlobs and ListReferences once, and assumes that there +// is no change in the set of references or blobs after calling those +// functions. In other words, it assumes it is the only user of the image that +// is making modifications. Things will not go well if this assumption is +// challenged. +func (e Engine) GC(ctx context.Context) error { + // Generate the root set of descriptors. + var root []ispec.Descriptor + + names, err := e.ListReferences(ctx) + if err != nil { + return errors.Wrap(err, "get roots") + } + + for _, name := range names { + // TODO: This code is no longer necessary once we have index.json. + descriptorPaths, err := e.ResolveReference(ctx, name) + if err != nil { + return errors.Wrapf(err, "get root %s", name) + } + if len(descriptorPaths) == 0 { + return errors.Errorf("tag not found: %s", name) + } + if len(descriptorPaths) != 1 { + // TODO: Handle this more nicely. + return errors.Errorf("tag is ambiguous: %s", name) + } + descriptor := descriptorPaths[0].Descriptor() + log.WithFields(log.Fields{ + "name": name, + "digest": descriptor.Digest, + }).Debugf("GC: got reference") + root = append(root, descriptor) + } + + // Mark from the root sets. + black := map[digest.Digest]struct{}{} + for idx, descriptor := range root { + log.WithFields(log.Fields{ + "digest": descriptor.Digest, + }).Debugf("GC: marking from root") + + reachables, err := e.Reachable(ctx, descriptor) + if err != nil { + return errors.Wrapf(err, "getting reachables from root %d", idx) + } + for _, reachable := range reachables { + black[reachable] = struct{}{} + } + } + + // Sweep all blobs in the white set. + blobs, err := e.ListBlobs(ctx) + if err != nil { + return errors.Wrap(err, "get blob list") + } + + n := 0 + for _, digest := range blobs { + if _, ok := black[digest]; ok { + // Digest is in the black set. + continue + } + log.Infof("garbage collecting blob: %s", digest) + + if err := e.DeleteBlob(ctx, digest); err != nil { + return errors.Wrapf(err, "remove unmarked blob %s", digest) + } + n++ + } + + // Finally, tell CAS to GC it. + if err := e.Clean(ctx); err != nil { + return errors.Wrapf(err, "clean engine") + } + + log.Debugf("garbage collected %d blobs", n) + return nil +} diff --git a/vendor/github.com/openSUSE/umoci/oci/casext/json.go b/vendor/github.com/openSUSE/umoci/oci/casext/json.go new file mode 100644 index 0000000000..7c7d09f165 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/casext/json.go @@ -0,0 +1,45 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package casext + +import ( + "bytes" + "encoding/json" + + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PutBlobJSON adds a new JSON blob to the image (marshalled from the given +// interface). This is equivalent to calling PutBlob() with a JSON payload +// as the reader. Note that due to intricacies in the Go JSON +// implementation, we cannot guarantee that two calls to PutBlobJSON() will +// return the same digest. +// +// TODO: Use a proper JSON serialisation library, which actually guarantees +// consistent output. Go's JSON library doesn't even attempt to sort +// map[...]... objects (which have their iteration order randomised in +// Go). +func (e Engine) PutBlobJSON(ctx context.Context, data interface{}) (digest.Digest, int64, error) { + var buffer bytes.Buffer + if err := json.NewEncoder(&buffer).Encode(data); err != nil { + return "", -1, errors.Wrap(err, "encode JSON") + } + return e.PutBlob(ctx, &buffer) +} diff --git a/vendor/github.com/openSUSE/umoci/oci/casext/map.go b/vendor/github.com/openSUSE/umoci/oci/casext/map.go new file mode 100644 index 0000000000..ff983af9c7 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/casext/map.go @@ -0,0 +1,127 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package casext + +import ( + "reflect" + + "github.com/apex/log" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Used by walkState.mark() to determine which struct members are descriptors to +// recurse into them. We aren't interested in struct members which are not +// either a slice of ispec.Descriptor or ispec.Descriptor themselves. +var descriptorType = reflect.TypeOf(ispec.Descriptor{}) + +// DescriptorMapFunc is a function that is used to provide a mapping between +// different descriptor values with MapDescriptors. It will not be called +// concurrently, and will only be called once for each recursively resolved +// element. +type DescriptorMapFunc func(ispec.Descriptor) ispec.Descriptor + +// isDescriptor returns whether the given T is a ispec.Descriptor. +func isDescriptor(T reflect.Type) bool { + return T == descriptorType +} + +func mapDescriptors(V reflect.Value, mapFunc DescriptorMapFunc) error { + // We can ignore this value. + if !V.IsValid() { + return nil + } + + // First check that V isn't actually a ispec.Descriptor, if it is then + // we're done. + if isDescriptor(V.Type()) { + old := V.Interface().(ispec.Descriptor) + new := mapFunc(old) + + // We only need to do any assignment if the two are not equal. + if !reflect.DeepEqual(new, old) { + // P is a ptr to V (or just V if it's already a pointer). + P := V + if !P.CanSet() { + // This is a programmer error. + return errors.Errorf("[internal error] cannot apply map function to %v: %v is not settable!", P, P.Type()) + } + P.Set(reflect.ValueOf(new)) + } + return nil + } + + // Recurse into all the types. + switch V.Kind() { + case reflect.Ptr, reflect.Interface: + // Just deref the pointer/interface. + if V.IsNil() { + return nil + } + err := mapDescriptors(V.Elem(), mapFunc) + return errors.Wrapf(err, "%v", V.Type()) + + case reflect.Slice, reflect.Array: + // Iterate over each element. + for idx := 0; idx < V.Len(); idx++ { + err := mapDescriptors(V.Index(idx), mapFunc) + if err != nil { + return errors.Wrapf(err, "%v[%d]->%v", V.Type(), idx, V.Index(idx).Type()) + } + } + return nil + + case reflect.Struct: + // We are only ever going to be interested in ispec.* types. + // XXX: This is something we might want to revisit in the future. + if V.Type().PkgPath() != descriptorType.PkgPath() { + log.WithFields(log.Fields{ + "name": V.Type().PkgPath() + "::" + V.Type().Name(), + "v1path": descriptorType.PkgPath(), + }).Debugf("detected escape to outside ispec.* namespace") + return nil + } + + // We can now actually iterate through a struct to find all descriptors. + for idx := 0; idx < V.NumField(); idx++ { + err := mapDescriptors(V.Field(idx), mapFunc) + if err != nil { + return errors.Wrapf(err, "%v[%d=%s]->%v", V.Type(), idx, V.Type().Field(idx).Name, V.Field(idx).Type()) + } + } + return nil + + default: + // FIXME: Should we log something here? While this will be hit normally + // (namely when we hit an io.ReadCloser) this seems a bit + // careless. + return nil + } + + // Unreachable. +} + +// MapDescriptors applies the given function once for every instance of +// ispec.Descriptor found in the given type, and replaces it with the returned +// value (which may be the same). This is done through the reflection API in +// Go, which means that hidden attributes may be inaccessible. +// DescriptorMapFunc will only be executed once for every ispec.Descriptor +// found. +func MapDescriptors(i interface{}, mapFunc DescriptorMapFunc) error { + return mapDescriptors(reflect.ValueOf(i), mapFunc) +} diff --git a/vendor/github.com/openSUSE/umoci/oci/casext/refname.go b/vendor/github.com/openSUSE/umoci/oci/casext/refname.go new file mode 100644 index 0000000000..a57ddcc8e6 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/casext/refname.go @@ -0,0 +1,229 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package casext + +import ( + "regexp" + + "github.com/apex/log" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// isKnownMediaType returns whether a media type is known by the spec. This +// probably should be moved somewhere else to avoid going out of date. +func isKnownMediaType(mediaType string) bool { + return mediaType == ispec.MediaTypeDescriptor || + mediaType == ispec.MediaTypeImageManifest || + mediaType == ispec.MediaTypeImageIndex || + mediaType == ispec.MediaTypeImageLayer || + mediaType == ispec.MediaTypeImageLayerGzip || + mediaType == ispec.MediaTypeImageLayerNonDistributable || + mediaType == ispec.MediaTypeImageLayerNonDistributableGzip || + mediaType == ispec.MediaTypeImageConfig +} + +// refnameRegex is a regex that only matches reference names that are valid +// according to the OCI specification. See IsValidReferenceName for the EBNF. +var refnameRegex = regexp.MustCompile(`^([A-Za-z0-9]+(([-._:@+]|--)[A-Za-z0-9]+)*)(/([A-Za-z0-9]+(([-._:@+]|--)[A-Za-z0-9]+)*))*$`) + +// IsValidReferenceName returns whether the provided annotation value for +// "org.opencontainers.image.ref.name" is actually valid according to the +// OCI specification. This only matches against the MUST requirement, not the +// SHOULD requirement. The EBNF defined in the specification is: +// +// refname ::= component ("/" component)* +// component ::= alphanum (separator alphanum)* +// alphanum ::= [A-Za-z0-9]+ +// separator ::= [-._:@+] | "--" +func IsValidReferenceName(refname string) bool { + return refnameRegex.MatchString(refname) +} + +// ResolveReference will attempt to resolve all possible descriptor paths to +// Manifests (or any unknown blobs) that match a particular reference name (if +// descriptors are stored in non-standard blobs, Resolve will be unable to find +// them but will return the top-most unknown descriptor). +// ResolveReference assumes that "reference name" refers to the value of the +// "org.opencontainers.image.ref.name" descriptor annotation. It is recommended +// that if the returned slice of descriptors is greater than zero that the user +// be consulted to resolve the conflict (due to ambiguity in resolution paths). +// +// TODO: How are we meant to implement other restrictions such as the +// architecture and feature flags? The API will need to change. +func (e Engine) ResolveReference(ctx context.Context, refname string) ([]DescriptorPath, error) { + // XXX: It should be possible to override this somehow, in case we are + // dealing with an image that abuses the image specification in some + // way. + if !IsValidReferenceName(refname) { + return nil, errors.Errorf("refusing to resolve invalid reference %q", refname) + } + + index, err := e.GetIndex(ctx) + if err != nil { + return nil, errors.Wrap(err, "get top-level index") + } + + // Set of root links that match the given refname. + var roots []ispec.Descriptor + + // We only consider the case where AnnotationRefName is defined on the + // top-level of the index tree. While this isn't codified in the spec (at + // the time of writing -- 1.0.0-rc5) there are some discussions to add this + // restriction in 1.0.0-rc6. + for _, descriptor := range index.Manifests { + // XXX: What should we do if refname == "". + if descriptor.Annotations[ispec.AnnotationRefName] == refname { + roots = append(roots, descriptor) + } + } + + // The resolved set of descriptors. + var resolutions []DescriptorPath + for _, root := range roots { + // Find all manifests or other blobs that are reachable from the given + // descriptor. + if err := e.Walk(ctx, root, func(descriptorPath DescriptorPath) error { + descriptor := descriptorPath.Descriptor() + + // It is very important that we do not ignore unknown media types + // here. We only recurse into mediaTypes that are *known* and are + // also not ispec.MediaTypeImageManifest. + if isKnownMediaType(descriptor.MediaType) && descriptor.MediaType != ispec.MediaTypeImageManifest { + return nil + } + + // Add the resolution and do not recurse any deeper. + resolutions = append(resolutions, descriptorPath) + return ErrSkipDescriptor + }); err != nil { + return nil, errors.Wrapf(err, "walk %s", root.Digest) + } + } + + log.WithFields(log.Fields{ + "refs": resolutions, + }).Debugf("casext.ResolveReference(%s) got these descriptors", refname) + return resolutions, nil +} + +// XXX: Should the *Reference set of interfaces support DescriptorPath? While +// it might seem like it doesn't make sense, a DescriptorPath entirely +// removes ambiguity with regards to which root needs to be operated on. +// If a user has that information we should provide them a way to use it. + +// UpdateReference replaces an existing entry for refname with the given +// descriptor. If there are multiple descriptors that match the refname they +// are all replaced with the given descriptor. +func (e Engine) UpdateReference(ctx context.Context, refname string, descriptor ispec.Descriptor) error { + // XXX: It should be possible to override this somehow, in case we are + // dealing with an image that abuses the image specification in some + // way. + if !IsValidReferenceName(refname) { + return errors.Errorf("refusing to update invalid reference %q", refname) + } + + // Get index to modify. + index, err := e.GetIndex(ctx) + if err != nil { + return errors.Wrap(err, "get top-level index") + } + + // TODO: Handle refname = "". + var newIndex []ispec.Descriptor + for _, descriptor := range index.Manifests { + if descriptor.Annotations[ispec.AnnotationRefName] != refname { + newIndex = append(newIndex, descriptor) + } + } + if len(newIndex)-len(index.Manifests) > 1 { + // Warn users if the operation is going to remove more than one references. + log.Warn("multiple references match the given reference name -- all of them have been replaced due to this ambiguity") + } + + // Append the descriptor. + if descriptor.Annotations == nil { + descriptor.Annotations = map[string]string{} + } + descriptor.Annotations[ispec.AnnotationRefName] = refname + newIndex = append(newIndex, descriptor) + + // Commit to image. + index.Manifests = newIndex + if err := e.PutIndex(ctx, index); err != nil { + return errors.Wrap(err, "replace index") + } + return nil +} + +// DeleteReference removes all entries in the index that match the given +// refname. +func (e Engine) DeleteReference(ctx context.Context, refname string) error { + // XXX: It should be possible to override this somehow, in case we are + // dealing with an image that abuses the image specification in some + // way. + if !IsValidReferenceName(refname) { + return errors.Errorf("refusing to delete invalid reference %q", refname) + } + + // Get index to modify. + index, err := e.GetIndex(ctx) + if err != nil { + return errors.Wrap(err, "get top-level index") + } + + // TODO: Handle refname = "". + var newIndex []ispec.Descriptor + for _, descriptor := range index.Manifests { + if descriptor.Annotations[ispec.AnnotationRefName] != refname { + newIndex = append(newIndex, descriptor) + } + } + if len(newIndex)-len(index.Manifests) > 1 { + // Warn users if the operation is going to remove more than one references. + log.Warn("multiple references match the given reference name -- all of them have been deleted due to this ambiguity") + } + + // Commit to image. + index.Manifests = newIndex + if err := e.PutIndex(ctx, index); err != nil { + return errors.Wrap(err, "replace index") + } + return nil +} + +// ListReferences returns all of the ref.name entries that are specified in the +// top-level index. Note that the list may contain duplicates, due to the +// nature of references in the image-spec. +func (e Engine) ListReferences(ctx context.Context) ([]string, error) { + // Get index. + index, err := e.GetIndex(ctx) + if err != nil { + return nil, errors.Wrap(err, "get top-level index") + } + + var refs []string + for _, descriptor := range index.Manifests { + ref, ok := descriptor.Annotations[ispec.AnnotationRefName] + if ok { + refs = append(refs, ref) + } + } + return refs, nil +} diff --git a/vendor/github.com/openSUSE/umoci/oci/casext/verified_blob.go b/vendor/github.com/openSUSE/umoci/oci/casext/verified_blob.go new file mode 100644 index 0000000000..622e03f65b --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/casext/verified_blob.go @@ -0,0 +1,40 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package casext + +import ( + "context" + "io" + + "github.com/openSUSE/umoci/pkg/hardening" + ispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// GetVerifiedBlob returns a VerifiedReadCloser for retrieving a blob from the +// image, which the caller must Close() *and* read-to-EOF (checking the error +// code of both). Returns ErrNotExist if the digest is not found, and +// ErrBlobDigestMismatch on a mismatched blob digest. In addition, the reader +// is limited to the descriptor.Size. +func (e Engine) GetVerifiedBlob(ctx context.Context, descriptor ispec.Descriptor) (io.ReadCloser, error) { + reader, err := e.GetBlob(ctx, descriptor.Digest) + return &hardening.VerifiedReadCloser{ + Reader: reader, + ExpectedDigest: descriptor.Digest, + ExpectedSize: descriptor.Size, + }, err +} diff --git a/vendor/github.com/openSUSE/umoci/oci/casext/walk.go b/vendor/github.com/openSUSE/umoci/oci/casext/walk.go new file mode 100644 index 0000000000..4ab941a45f --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/casext/walk.go @@ -0,0 +1,194 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package casext + +import ( + "errors" + + "github.com/apex/log" + "github.com/openSUSE/umoci/oci/cas" + "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/net/context" +) + +// childDescriptors is a wrapper around MapDescriptors which just creates a +// slice of all of the arguments, and doesn't modify them. +func childDescriptors(i interface{}) []ispec.Descriptor { + var children []ispec.Descriptor + if err := MapDescriptors(i, func(descriptor ispec.Descriptor) ispec.Descriptor { + children = append(children, descriptor) + return descriptor + }); err != nil { + // If we got an error, this is a bug in MapDescriptors proper. + log.Fatalf("[internal error] MapDescriptors returned an error inside childDescriptors: %+v", err) + } + return children +} + +// walkState stores state information about the recursion into a given +// descriptor tree. +type walkState struct { + // engine is the CAS engine we are operating on. + engine Engine + + // walkFunc is the WalkFunc provided by the user. + walkFunc WalkFunc +} + +// DescriptorPath is used to describe the path of descriptors (from a top-level +// index) that were traversed when resolving a particular reference name. The +// purpose of this is to allow libraries like github.com/openSUSE/umoci/mutate +// to handle generic manifest updates given an arbitrary descriptor walk. Users +// of ResolveReference that don't care about the descriptor path can just use +// .Descriptor. +type DescriptorPath struct { + // Walk is the set of descriptors walked to reach Descriptor (inclusive). + // The order is the same as the order of the walk, with the target being + // the last entry and the entrypoint from index.json being the first. + Walk []ispec.Descriptor `json:"descriptor_walk"` +} + +// Root returns the first step in the DescriptorPath, which is the point where +// the walk started. This is just shorthand for DescriptorPath.Walk[0]. Root +// will *panic* if DescriptorPath is invalid. +func (d DescriptorPath) Root() ispec.Descriptor { + if len(d.Walk) < 1 { + panic("empty DescriptorPath") + } + return d.Walk[0] +} + +// Descriptor returns the final step in the DescriptorPath, which is the target +// descriptor being referenced by DescriptorPath. This is just shorthand for +// accessing the last entry of DescriptorPath.Walk. Descriptor will *panic* if +// DescriptorPath is invalid. +func (d DescriptorPath) Descriptor() ispec.Descriptor { + if len(d.Walk) < 1 { + panic("empty DescriptorPath") + } + return d.Walk[len(d.Walk)-1] +} + +// ErrSkipDescriptor is a special error returned by WalkFunc which will cause +// Walk to not recurse into the descriptor currently being evaluated by +// WalkFunc. This interface is roughly equivalent to filepath.SkipDir. +var ErrSkipDescriptor = errors.New("[internal] do not recurse into descriptor") + +// WalkFunc is the type of function passed to Walk. It will be a called on each +// descriptor encountered, recursively -- which may involve the function being +// called on the same descriptor multiple times (though because an OCI image is +// a Merkle tree there will never be any loops). If an error is returned by +// WalkFunc, the recursion will halt and the error will bubble up to the +// caller. +// +// TODO: Also provide Blob to WalkFunc so that callers don't need to load blobs +// more than once. This is quite important for remote CAS implementations. +type WalkFunc func(descriptorPath DescriptorPath) error + +func (ws *walkState) recurse(ctx context.Context, descriptorPath DescriptorPath) error { + log.WithFields(log.Fields{ + "digest": descriptorPath.Descriptor().Digest, + }).Debugf("-> ws.recurse") + defer log.WithFields(log.Fields{ + "digest": descriptorPath.Descriptor().Digest, + }).Debugf("<- ws.recurse") + + // Run walkFunc. + if err := ws.walkFunc(descriptorPath); err != nil { + if err == ErrSkipDescriptor { + return nil + } + return err + } + + // Get blob to recurse into. + descriptor := descriptorPath.Descriptor() + blob, err := ws.engine.FromDescriptor(ctx, descriptor) + if err != nil { + // Ignore cases where the descriptor points to an object we don't know + // how to parse. + if err == cas.ErrUnknownType { + log.Infof("skipping walk into unknown media-type %v of blob %v", descriptor.MediaType, descriptor.Digest) + return nil + } + return err + } + defer blob.Close() + + // Recurse into children. + for _, child := range childDescriptors(blob.Data) { + if err := ws.recurse(ctx, DescriptorPath{ + Walk: append(descriptorPath.Walk, child), + }); err != nil { + return err + } + } + + return nil +} + +// Walk preforms a depth-first walk from a given root descriptor, using the +// provided CAS engine to fetch all other necessary descriptors. If an error is +// returned by the provided WalkFunc, walking is terminated and the error is +// returned to the caller. +func (e Engine) Walk(ctx context.Context, root ispec.Descriptor, walkFunc WalkFunc) error { + ws := &walkState{ + engine: e, + walkFunc: walkFunc, + } + return ws.recurse(ctx, DescriptorPath{ + Walk: []ispec.Descriptor{root}, + }) +} + +// Paths returns the set of descriptor paths that can be traversed from the +// provided root descriptor. It is effectively shorthand for Walk(). Note that +// there may be repeated descriptors in the returned slice, due to different +// blobs containing the same (or a similar) descriptor. However, the +// DescriptorPaths should be unique. +func (e Engine) Paths(ctx context.Context, root ispec.Descriptor) ([]DescriptorPath, error) { + var reachable []DescriptorPath + err := e.Walk(ctx, root, func(descriptorPath DescriptorPath) error { + reachable = append(reachable, descriptorPath) + return nil + }) + return reachable, err +} + +// Reachable returns the set of digests which can be reached using a descriptor +// path from the provided root descriptor. It is effectively a shorthand for +// Walk(). The returned slice will *not* contain any duplicate digest.Digest +// entries. Note that without descriptors, a digest is not particularly +// meaninful (OCI blobs are not self-descriptive). +func (e Engine) Reachable(ctx context.Context, root ispec.Descriptor) ([]digest.Digest, error) { + seen := map[digest.Digest]struct{}{} + + if err := e.Walk(ctx, root, func(descriptorPath DescriptorPath) error { + seen[descriptorPath.Descriptor().Digest] = struct{}{} + return nil + }); err != nil { + return nil, err + } + + var reachable []digest.Digest + for node := range seen { + reachable = append(reachable, node) + } + return reachable, nil +} diff --git a/vendor/github.com/openSUSE/umoci/oci/config/convert/README.md b/vendor/github.com/openSUSE/umoci/oci/config/convert/README.md new file mode 100644 index 0000000000..4fb7076c44 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/config/convert/README.md @@ -0,0 +1,11 @@ +### `umoci/oci/config/convert` ### + +One fairly important aspect of creating a runtime bundle is the configuration +of the container. While an image configuration and runtime configuration are +defined on different levels (images are far more platform agnostic than runtime +bundles), conversion from an image to a runtime configuration is defined as +part of the OCI specification (thanks to this reference implementation). + +This package implements a fairly unopinionated implementation of that +conversion, allowing consumers to easily add their own extensions in the +runtime configuration generation. diff --git a/vendor/github.com/openSUSE/umoci/oci/config/convert/runtime.go b/vendor/github.com/openSUSE/umoci/oci/config/convert/runtime.go new file mode 100644 index 0000000000..88b8a3b6dc --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/config/convert/runtime.go @@ -0,0 +1,176 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package convert + +import ( + "path/filepath" + "runtime" + "strings" + + "github.com/apex/log" + igen "github.com/openSUSE/umoci/oci/config/generate" + "github.com/openSUSE/umoci/third_party/user" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + rspec "github.com/opencontainers/runtime-spec/specs-go" + rgen "github.com/opencontainers/runtime-tools/generate" + "github.com/pkg/errors" +) + +// Annotations described by the OCI image-spec document (these represent fields +// in an image configuration that do not have a native representation in the +// runtime-spec). +const ( + authorAnnotation = "org.opencontainers.image.author" + createdAnnotation = "org.opencontainers.image.created" + stopSignalAnnotation = "org.opencontainers.image.stopSignal" + exposedPortsAnnotation = "org.opencontainers.image.exposedPorts" +) + +// ToRuntimeSpec converts the given OCI image configuration to a runtime +// configuration appropriate for use, which is templated on the default +// configuration specified by the OCI runtime-tools. It is equivalent to +// MutateRuntimeSpec("runtime-tools/generate".New(), image).Spec(). +func ToRuntimeSpec(rootfs string, image ispec.Image) (rspec.Spec, error) { + g, err := rgen.New(runtime.GOOS) + if err != nil { + return rspec.Spec{}, err + } + if err := MutateRuntimeSpec(g, rootfs, image); err != nil { + return rspec.Spec{}, err + } + return *g.Spec(), nil +} + +// parseEnv splits a given environment variable (of the form name=value) into +// (name, value). An error is returned if there is no "=" in the line or if the +// name is empty. +func parseEnv(env string) (string, string, error) { + parts := strings.SplitN(env, "=", 2) + if len(parts) != 2 { + return "", "", errors.Errorf("environment variable must contain '=': %s", env) + } + + name, value := parts[0], parts[1] + if name == "" { + return "", "", errors.Errorf("environment variable must have non-empty name: %s", env) + } + return name, value, nil +} + +// MutateRuntimeSpec mutates a given runtime specification generator with the +// image configuration provided. It returns the original generator, and does +// not modify any fields directly (to allow for chaining). +func MutateRuntimeSpec(g rgen.Generator, rootfs string, image ispec.Image) error { + ig, err := igen.NewFromImage(image) + if err != nil { + return errors.Wrap(err, "creating image generator") + } + + if ig.OS() != "linux" { + return errors.Errorf("unsupported OS: %s", image.OS) + } + + // FIXME: We need to figure out if we're modifying an incompatible runtime spec. + //g.SetVersion(rspec.Version) + // TODO: We stopped including the OS and Architecture information in the runtime-spec. + // Make sure we fix that once https://github.com/opencontainers/image-spec/pull/711 + // is resolved. + + // Set verbatim fields + g.SetProcessTerminal(true) + g.SetRootPath(filepath.Base(rootfs)) + g.SetRootReadonly(false) + + g.SetProcessCwd("/") + if ig.ConfigWorkingDir() != "" { + g.SetProcessCwd(ig.ConfigWorkingDir()) + } + + for _, env := range ig.ConfigEnv() { + name, value, err := parseEnv(env) + if err != nil { + return errors.Wrap(err, "parsing image.Config.Env") + } + g.AddProcessEnv(name, value) + } + + args := []string{} + args = append(args, ig.ConfigEntrypoint()...) + args = append(args, ig.ConfigCmd()...) + if len(args) > 0 { + g.SetProcessArgs(args) + } + + // Set annotations fields + for key, value := range ig.ConfigLabels() { + g.AddAnnotation(key, value) + } + g.AddAnnotation(authorAnnotation, ig.Author()) + g.AddAnnotation(createdAnnotation, ig.Created().Format(igen.ISO8601)) + g.AddAnnotation(stopSignalAnnotation, image.Config.StopSignal) + + // Set parsed fields + // Get the *actual* uid and gid of the user. If the image doesn't contain + // an /etc/passwd or /etc/group file then GetExecUserPath will just do a + // numerical parsing. + var passwdPath, groupPath string + if rootfs != "" { + passwdPath = filepath.Join(rootfs, "/etc/passwd") + groupPath = filepath.Join(rootfs, "/etc/group") + } + execUser, err := user.GetExecUserPath(ig.ConfigUser(), nil, passwdPath, groupPath) + if err != nil { + // We only log an error if were not given a rootfs, and we set execUser + // to the "default" (root:root). + if rootfs != "" { + return errors.Wrapf(err, "cannot parse user spec: '%s'", ig.ConfigUser()) + } + log.Warnf("could not parse user spec '%s' without a rootfs -- defaulting to root:root", ig.ConfigUser()) + execUser = new(user.ExecUser) + } + + g.SetProcessUID(uint32(execUser.Uid)) + g.SetProcessGID(uint32(execUser.Gid)) + g.ClearProcessAdditionalGids() + + for _, gid := range execUser.Sgids { + g.AddProcessAdditionalGid(uint32(gid)) + } + if execUser.Home != "" { + g.AddProcessEnv("HOME", execUser.Home) + } + + // Set optional fields + ports := ig.ConfigExposedPortsArray() + g.AddAnnotation(exposedPortsAnnotation, strings.Join(ports, ",")) + + for vol := range ig.ConfigVolumes() { + // XXX: This is _fine_ but might cause some issues in the future. + g.AddMount(rspec.Mount{ + Destination: vol, + Type: "tmpfs", + Source: "none", + Options: []string{"rw", "nosuid", "nodev", "noexec", "relatime"}, + }) + } + + // Remove all seccomp rules. + g.Spec().Linux.Seccomp = nil + + return nil +} diff --git a/vendor/github.com/openSUSE/umoci/oci/config/generate/README.md b/vendor/github.com/openSUSE/umoci/oci/config/generate/README.md new file mode 100644 index 0000000000..ca3ac7340b --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/config/generate/README.md @@ -0,0 +1,11 @@ +### `umoci/oci/config/generate` ### + +This intends to be a library like `runtime-tools/generate` which allows you to +generate modifications to an OCI image configuration blob (of type +[`application/vnd.oci.image.config.v1+json`][oci-image-config]). It's a bit of +a shame that this is necessary, but it shouldn't be *that bad* to implement + +The hope is that this library (or some form of it) will become an upstream +library so I don't have to maintain this for any extended period of time. + +[oci-image-config]: https://github.com/opencontainers/image-spec/blob/master/config.md diff --git a/vendor/github.com/openSUSE/umoci/oci/config/generate/save.go b/vendor/github.com/openSUSE/umoci/oci/config/generate/save.go new file mode 100644 index 0000000000..69499cfb06 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/config/generate/save.go @@ -0,0 +1,55 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package generate + +import ( + "encoding/json" + "io" + + "github.com/pkg/errors" +) + +// fakeBuffer implements the io.Writer interface but just counts the number of +// bytes "written" to it. +type fakeBuffer struct { + n int64 +} + +// Write just counts the number of bytes requested to be written. +func (fb *fakeBuffer) Write(p []byte) (int, error) { + size := len(p) + fb.n += int64(size) + return size, nil +} + +// WriteTo outputs a JSON-marshalled version of the current state of the +// generator. It is not guaranteed that the generator will produce the same +// output given the same state, so it's recommended to only call this function +// once. The JSON is not pretty-printed. +func (g *Generator) WriteTo(w io.Writer) (n int64, err error) { + // We need to return the number of bytes written, which json.NewEncoder + // won't give us. So we have to cheat a little to get the answer. + var fb fakeBuffer + w = io.MultiWriter(w, &fb) + + if err := json.NewEncoder(w).Encode(g.image); err != nil { + return fb.n, errors.Wrap(err, "encode image") + } + + return fb.n, nil +} diff --git a/vendor/github.com/openSUSE/umoci/oci/config/generate/spec.go b/vendor/github.com/openSUSE/umoci/oci/config/generate/spec.go new file mode 100644 index 0000000000..2f2025c3d1 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/config/generate/spec.go @@ -0,0 +1,383 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package generate + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Because we are not a part of upstream, we have to add some tests that +// ensure that this set of getters and setters is complete. This should +// be possible through some reflection. + +// Generator allows you to generate a mutable OCI image-spec configuration +// which can be written to a file (and its digest computed). It is the +// recommended way of handling modification and generation of image-spec +// configuration blobs. +type Generator struct { + image ispec.Image +} + +// init makes sure everything has a "proper" zero value. +func (g *Generator) init() { + if g.image.Config.ExposedPorts == nil { + g.ClearConfigExposedPorts() + } + if g.image.Config.Env == nil { + g.ClearConfigEnv() + } + if g.image.Config.Entrypoint == nil { + g.ClearConfigEntrypoint() + } + if g.image.Config.Cmd == nil { + g.ClearConfigCmd() + } + if g.image.Config.Volumes == nil { + g.ClearConfigVolumes() + } + if g.image.Config.Labels == nil { + g.ClearConfigLabels() + } + if g.image.RootFS.DiffIDs == nil { + g.ClearRootfsDiffIDs() + } + if g.image.History == nil { + g.ClearHistory() + } +} + +// New creates a new Generator with the inital template set to a default. It is +// not recommended to leave any of the options as their default values (they +// may change in the future without warning and may be invalid images). +func New() *Generator { + // FIXME: Come up with some sane default. + g := &Generator{ + image: ispec.Image{}, + } + g.init() + return g +} + +// NewFromImage generates a new generator with the initial template being the +// given ispec.Image. +func NewFromImage(image ispec.Image) (*Generator, error) { + g := &Generator{ + image: image, + } + + g.init() + return g, nil +} + +// Image returns a copy of the current state of the generated image. +func (g *Generator) Image() ispec.Image { + return g.image +} + +// SetConfigUser sets the username or UID which the process in the container should run as. +func (g *Generator) SetConfigUser(user string) { + g.image.Config.User = user +} + +// ConfigUser returns the username or UID which the process in the container should run as. +func (g *Generator) ConfigUser() string { + return g.image.Config.User +} + +// ClearConfigExposedPorts clears the set of ports to expose from a container running this image. +func (g *Generator) ClearConfigExposedPorts() { + g.image.Config.ExposedPorts = map[string]struct{}{} +} + +// AddConfigExposedPort adds a port the set of ports to expose from a container running this image. +func (g *Generator) AddConfigExposedPort(port string) { + g.image.Config.ExposedPorts[port] = struct{}{} +} + +// RemoveConfigExposedPort removes a port the set of ports to expose from a container running this image. +func (g *Generator) RemoveConfigExposedPort(port string) { + delete(g.image.Config.ExposedPorts, port) +} + +// ConfigExposedPorts returns the set of ports to expose from a container running this image. +func (g *Generator) ConfigExposedPorts() map[string]struct{} { + // We have to make a copy to preserve the privacy of g.image.Config. + copy := map[string]struct{}{} + for k, v := range g.image.Config.ExposedPorts { + copy[k] = v + } + return copy +} + +// ConfigExposedPortsArray returns a sorted array of ports to expose from a container running this image. +func (g *Generator) ConfigExposedPortsArray() []string { + var ports []string + for port := range g.image.Config.ExposedPorts { + ports = append(ports, port) + } + sort.Strings(ports) + return ports +} + +// ClearConfigEnv clears the list of environment variables to be used in a container. +func (g *Generator) ClearConfigEnv() { + g.image.Config.Env = []string{} +} + +// AddConfigEnv appends to the list of environment variables to be used in a container. +func (g *Generator) AddConfigEnv(name, value string) { + // If the key already exists in the environment set, we replace it. + // This ensures we don't run into POSIX undefined territory. + env := fmt.Sprintf("%s=%s", name, value) + for idx := range g.image.Config.Env { + if strings.HasPrefix(g.image.Config.Env[idx], name+"=") { + g.image.Config.Env[idx] = env + return + } + } + g.image.Config.Env = append(g.image.Config.Env, env) +} + +// ConfigEnv returns the list of environment variables to be used in a container. +func (g *Generator) ConfigEnv() []string { + copy := []string{} + for _, v := range g.image.Config.Env { + copy = append(copy, v) + } + return copy +} + +// ClearConfigEntrypoint clears the list of arguments to use as the command to execute when the container starts. +func (g *Generator) ClearConfigEntrypoint() { + g.image.Config.Entrypoint = []string{} +} + +// SetConfigEntrypoint sets the list of arguments to use as the command to execute when the container starts. +func (g *Generator) SetConfigEntrypoint(entrypoint []string) { + copy := []string{} + for _, v := range entrypoint { + copy = append(copy, v) + } + g.image.Config.Entrypoint = copy +} + +// ConfigEntrypoint returns the list of arguments to use as the command to execute when the container starts. +func (g *Generator) ConfigEntrypoint() []string { + // We have to make a copy to preserve the privacy of g.image.Config. + copy := []string{} + for _, v := range g.image.Config.Entrypoint { + copy = append(copy, v) + } + return copy +} + +// ClearConfigCmd clears the list of default arguments to the entrypoint of the container. +func (g *Generator) ClearConfigCmd() { + g.image.Config.Cmd = []string{} +} + +// SetConfigCmd sets the list of default arguments to the entrypoint of the container. +func (g *Generator) SetConfigCmd(cmd []string) { + copy := []string{} + for _, v := range cmd { + copy = append(copy, v) + } + g.image.Config.Cmd = copy +} + +// ConfigCmd returns the list of default arguments to the entrypoint of the container. +func (g *Generator) ConfigCmd() []string { + // We have to make a copy to preserve the privacy of g.image.Config. + copy := []string{} + for _, v := range g.image.Config.Cmd { + copy = append(copy, v) + } + return copy +} + +// ClearConfigVolumes clears the set of directories which should be created as data volumes in a container running this image. +func (g *Generator) ClearConfigVolumes() { + g.image.Config.Volumes = map[string]struct{}{} +} + +// AddConfigVolume adds a volume to the set of directories which should be created as data volumes in a container running this image. +func (g *Generator) AddConfigVolume(volume string) { + g.image.Config.Volumes[volume] = struct{}{} +} + +// RemoveConfigVolume removes a volume from the set of directories which should be created as data volumes in a container running this image. +func (g *Generator) RemoveConfigVolume(volume string) { + delete(g.image.Config.Volumes, volume) +} + +// ConfigVolumes returns the set of directories which should be created as data volumes in a container running this image. +func (g *Generator) ConfigVolumes() map[string]struct{} { + // We have to make a copy to preserve the privacy of g.image.Config. + copy := map[string]struct{}{} + for k, v := range g.image.Config.Volumes { + copy[k] = v + } + return copy +} + +// ClearConfigLabels clears the set of arbitrary metadata for the container. +func (g *Generator) ClearConfigLabels() { + g.image.Config.Labels = map[string]string{} +} + +// AddConfigLabel adds a label to the set of arbitrary metadata for the container. +func (g *Generator) AddConfigLabel(label, value string) { + g.image.Config.Labels[label] = value +} + +// RemoveConfigLabel removes a label from the set of arbitrary metadata for the container. +func (g *Generator) RemoveConfigLabel(label string) { + delete(g.image.Config.Labels, label) +} + +// ConfigLabels returns the set of arbitrary metadata for the container. +func (g *Generator) ConfigLabels() map[string]string { + // We have to make a copy to preserve the privacy of g.image.Config. + copy := map[string]string{} + for k, v := range g.image.Config.Labels { + copy[k] = v + } + return copy +} + +// SetConfigWorkingDir sets the current working directory of the entrypoint process in the container. +func (g *Generator) SetConfigWorkingDir(workingDir string) { + g.image.Config.WorkingDir = workingDir +} + +// ConfigWorkingDir returns the current working directory of the entrypoint process in the container. +func (g *Generator) ConfigWorkingDir() string { + return g.image.Config.WorkingDir +} + +// SetConfigStopSignal sets the system call signal that will be sent to the container to exit. +func (g *Generator) SetConfigStopSignal(stopSignal string) { + g.image.Config.StopSignal = stopSignal +} + +// ConfigStopSignal returns the system call signal that will be sent to the container to exit. +func (g *Generator) ConfigStopSignal() string { + return g.image.Config.StopSignal +} + +// SetRootfsType sets the type of the rootfs. +func (g *Generator) SetRootfsType(rootfsType string) { + g.image.RootFS.Type = rootfsType +} + +// RootfsType returns the type of the rootfs. +func (g *Generator) RootfsType() string { + return g.image.RootFS.Type +} + +// ClearRootfsDiffIDs clears the array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +func (g *Generator) ClearRootfsDiffIDs() { + g.image.RootFS.DiffIDs = []digest.Digest{} +} + +// AddRootfsDiffID appends to the array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +func (g *Generator) AddRootfsDiffID(diffid digest.Digest) { + g.image.RootFS.DiffIDs = append(g.image.RootFS.DiffIDs, diffid) +} + +// RootfsDiffIDs returns the the array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +func (g *Generator) RootfsDiffIDs() []digest.Digest { + copy := []digest.Digest{} + for _, v := range g.image.RootFS.DiffIDs { + copy = append(copy, v) + } + return copy +} + +// ClearHistory clears the history of each layer. +func (g *Generator) ClearHistory() { + g.image.History = []ispec.History{} +} + +// AddHistory appends to the history of the layers. +func (g *Generator) AddHistory(history ispec.History) { + g.image.History = append(g.image.History, history) +} + +// History returns the history of each layer. +func (g *Generator) History() []ispec.History { + copy := []ispec.History{} + for _, v := range g.image.History { + copy = append(copy, v) + } + return copy +} + +// ISO8601 represents the format of an ISO-8601 time string, which is identical +// to Go's RFC3339 specification. +const ISO8601 = time.RFC3339Nano + +// SetCreated sets the combined date and time at which the image was created. +func (g *Generator) SetCreated(created time.Time) { + g.image.Created = &created +} + +// Created gets the combined date and time at which the image was created. +func (g *Generator) Created() time.Time { + if g.image.Created == nil { + // TODO: Maybe we should be returning pointers? + return time.Time{} + } + return *g.image.Created +} + +// SetAuthor sets the name and/or email address of the person or entity which created and is responsible for maintaining the image. +func (g *Generator) SetAuthor(author string) { + g.image.Author = author +} + +// Author returns the name and/or email address of the person or entity which created and is responsible for maintaining the image. +func (g *Generator) Author() string { + return g.image.Author +} + +// SetArchitecture is the CPU architecture which the binaries in this image are built to run on. +func (g *Generator) SetArchitecture(arch string) { + g.image.Architecture = arch +} + +// Architecture returns the CPU architecture which the binaries in this image are built to run on. +func (g *Generator) Architecture() string { + return g.image.Architecture +} + +// SetOS sets the name of the operating system which the image is built to run on. +func (g *Generator) SetOS(os string) { + g.image.OS = os +} + +// OS returns the name of the operating system which the image is built to run on. +func (g *Generator) OS() string { + return g.image.OS +} diff --git a/vendor/github.com/openSUSE/umoci/oci/layer/README.md b/vendor/github.com/openSUSE/umoci/oci/layer/README.md new file mode 100644 index 0000000000..6e94875e52 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/layer/README.md @@ -0,0 +1,21 @@ +### `umoci/oci/layer` ### + +This is my own implementation of the [currently under development +`oci-create-layer` functions][create-layer]. The reason for implementing this +myself is that we use [`mtree` specifications][mtree] which are not the same +method that `oci-create-layer` uses. While the two implementations could be +combined (since this implementation is more general), in order to speed things +up I just decided to implement it myself. + +This also implements `oci-create-runtime-bundle`, since it's under layer +management. The real difference is that we've split up the API (and based it on +CAS) so we have more control when generating the bundle. + +I'm hoping that this will be merged upstream, but since it's just a whiteout +tar archive generator there isn't a *huge* requirement that this is kept up to +date. Though, it should be noted that [the whiteout format may change in the +future][whiteout-disc]. + +[create-layer]: https://github.com/opencontainers/image-tools/pull/8 +[mtree]: https://github.com/vbatts/go-mtree +[whiteout-disc]: https://github.com/opencontainers/image-spec/issues/24 diff --git a/vendor/github.com/openSUSE/umoci/oci/layer/generate.go b/vendor/github.com/openSUSE/umoci/oci/layer/generate.go new file mode 100644 index 0000000000..f3a180a070 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/layer/generate.go @@ -0,0 +1,144 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer + +import ( + "io" + "os" + "path" + "path/filepath" + "sort" + + "github.com/apex/log" + "github.com/openSUSE/umoci/pkg/unpriv" + "github.com/pkg/errors" + "github.com/vbatts/go-mtree" +) + +// inodeDeltas is a wrapper around []mtree.InodeDelta that allows for sorting +// the set of deltas by the pathname. +type inodeDeltas []mtree.InodeDelta + +func (ids inodeDeltas) Len() int { return len(ids) } +func (ids inodeDeltas) Less(i, j int) bool { return ids[i].Path() < ids[j].Path() } +func (ids inodeDeltas) Swap(i, j int) { ids[i], ids[j] = ids[j], ids[i] } + +// GenerateLayer creates a new OCI diff layer based on the mtree diff provided. +// All of the mtree.Modified and mtree.Extra blobs are read relative to the +// provided path (which should be the rootfs of the layer that was diffed). The +// returned reader is for the *raw* tar data, it is the caller's responsibility +// to gzip it. +func GenerateLayer(path string, deltas []mtree.InodeDelta, opt *MapOptions) (io.ReadCloser, error) { + var mapOptions MapOptions + if opt != nil { + mapOptions = *opt + } + + reader, writer := io.Pipe() + + go func() (Err error) { + // Close with the returned error. + defer func() { + // #nosec G104 + _ = writer.CloseWithError(errors.Wrap(Err, "generate layer")) + }() + + // We can't just dump all of the file contents into a tar file. We need + // to emulate a proper tar generator. Luckily there aren't that many + // things to emulate (and we can do them all in tar.go). + tg := newTarGenerator(writer, mapOptions) + + // Sort the delta paths. + // FIXME: We need to add whiteouts first, otherwise we might end up + // doing something silly like deleting a file which we actually + // meant to modify. + sort.Sort(inodeDeltas(deltas)) + + for _, delta := range deltas { + name := delta.Path() + fullPath := filepath.Join(path, name) + + // XXX: It's possible that if we unlink a hardlink, we're going to + // AddFile() for no reason. Maybe we should drop nlink= from + // the set of keywords we care about? + + switch delta.Type() { + case mtree.Modified, mtree.Extra: + if err := tg.AddFile(name, fullPath); err != nil { + log.Warnf("generate layer: could not add file '%s': %s", name, err) + return errors.Wrap(err, "generate layer file") + } + case mtree.Missing: + if err := tg.AddWhiteout(name); err != nil { + log.Warnf("generate layer: could not add whiteout '%s': %s", name, err) + return errors.Wrap(err, "generate whiteout layer file") + } + } + } + + if err := tg.tw.Close(); err != nil { + log.Warnf("generate layer: could not close tar.Writer: %s", err) + return errors.Wrap(err, "close tar writer") + } + + return nil + }() + + return reader, nil +} + +// GenerateInsertLayer generates a completely new layer from "root"to be +// inserted into the image at "target". If "root" is an empty string then the +// "target" will be removed via a whiteout. +func GenerateInsertLayer(root string, target string, opaque bool, opt *MapOptions) io.ReadCloser { + root = CleanPath(root) + + var mapOptions MapOptions + if opt != nil { + mapOptions = *opt + } + + reader, writer := io.Pipe() + + go func() (Err error) { + defer func() { + // #nosec G104 + _ = writer.CloseWithError(errors.Wrap(Err, "generate layer")) + }() + + tg := newTarGenerator(writer, mapOptions) + + if opaque { + if err := tg.AddOpaqueWhiteout(target); err != nil { + return err + } + } + if root == "" { + return tg.AddWhiteout(target) + } + return unpriv.Walk(root, func(curPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + pathInTar := path.Join(target, curPath[len(root):]) + return tg.AddFile(pathInTar, curPath) + }) + }() + return reader +} diff --git a/vendor/github.com/openSUSE/umoci/oci/layer/tar_extract.go b/vendor/github.com/openSUSE/umoci/oci/layer/tar_extract.go new file mode 100644 index 0000000000..d265dd8511 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/layer/tar_extract.go @@ -0,0 +1,631 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/apex/log" + "github.com/cyphar/filepath-securejoin" + "github.com/openSUSE/umoci/pkg/fseval" + "github.com/openSUSE/umoci/pkg/system" + "github.com/openSUSE/umoci/third_party/shared" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// inUserNamespace is a cached return value of shared.RunningInUserNS(). We +// compute this once globally rather than for each unpack. It won't change (we +// would hope) after we check it the first time. +var inUserNamespace = shared.RunningInUserNS() + +// TarExtractor represents a tar file to be extracted. +type TarExtractor struct { + // mapOptions is the set of mapping options to use when extracting + // filesystem layers. + mapOptions MapOptions + + // partialRootless indicates whether "partial rootless" tricks should be + // applied in our extraction. Rootless and userns execution have some + // similar tricks necessary, but not all rootless tricks should be applied + // when running in a userns -- hence the term "partial rootless" tricks. + partialRootless bool + + // fsEval is an fseval.FsEval used for extraction. + fsEval fseval.FsEval + + // upperPaths are paths that have either been extracted in the execution of + // this TarExtractor or are ancestors of paths extracted. The purpose of + // having this stored in-memory is to be able to handle opaque whiteouts as + // well as some other possible ordering issues with malformed archives (the + // downside of this approach is that it takes up memory -- we could switch + // to a trie if necessary). These paths are relative to the tar root but + // are fully symlink-expanded so no need to worry about that line noise. + upperPaths map[string]struct{} +} + +// NewTarExtractor creates a new TarExtractor. +func NewTarExtractor(opt MapOptions) *TarExtractor { + fsEval := fseval.DefaultFsEval + if opt.Rootless { + fsEval = fseval.RootlessFsEval + } + + return &TarExtractor{ + mapOptions: opt, + partialRootless: opt.Rootless || inUserNamespace, + fsEval: fsEval, + upperPaths: make(map[string]struct{}), + } +} + +// restoreMetadata applies the state described in tar.Header to the filesystem +// at the given path. No sanity checking is done of the tar.Header's pathname +// or other information. In addition, no mapping is done of the header. +func (te *TarExtractor) restoreMetadata(path string, hdr *tar.Header) error { + // Some of the tar.Header fields don't match the OS API. + fi := hdr.FileInfo() + + // Get the _actual_ file info to figure out if the path is a symlink. + isSymlink := hdr.Typeflag == tar.TypeSymlink + if realFi, err := te.fsEval.Lstat(path); err == nil { + isSymlink = realFi.Mode()&os.ModeSymlink == os.ModeSymlink + } + + // Apply the owner. If we are rootless then "user.rootlesscontainers" has + // already been set up by unmapHeader, so nothing to do here. + if !te.mapOptions.Rootless { + // XXX: While unpriv.Lchown doesn't make a whole lot of sense this + // should _probably_ be put inside FsEval. + if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { + return errors.Wrapf(err, "restore chown metadata: %s", path) + } + } + + // We cannot apply hdr.Mode to symlinks, because symlinks don't have a mode + // of their own (they're special in that way). We have to apply this after + // we've applied the owner because setuid bits are cleared when changing + // owner (in rootless we don't care because we're always the owner). + if !isSymlink { + if err := te.fsEval.Chmod(path, fi.Mode()); err != nil { + return errors.Wrapf(err, "restore chmod metadata: %s", path) + } + } + + // Apply access and modified time. Note that some archives won't fill the + // atime and mtime fields, so we have to set them to a more sane value. + // Otherwise Linux will start screaming at us, and nobody wants that. + mtime := hdr.ModTime + if mtime.IsZero() { + // XXX: Should we instead default to atime if it's non-zero? + mtime = time.Now() + } + atime := hdr.AccessTime + if atime.IsZero() { + // Default to the mtime. + atime = mtime + } + + // Apply xattrs. In order to make sure that we *only* have the xattr set we + // want, we first clear the set of xattrs from the file then apply the ones + // set in the tar.Header. + if err := te.fsEval.Lclearxattrs(path, ignoreXattrs); err != nil { + return errors.Wrapf(err, "clear xattr metadata: %s", path) + } + for name, value := range hdr.Xattrs { + value := []byte(value) + + // Forbidden xattrs should never be touched. + if _, skip := ignoreXattrs[name]; skip { + // If the xattr is already set to the requested value, don't bail. + // The reason for this logic is kinda convoluted, but effectively + // because restoreMetadata is called with the *on-disk* metadata we + // run the risk of things like "security.selinux" being included in + // that metadata (and thus tripping the forbidden xattr error). By + // only touching xattrs that have a different value we are somewhat + // more efficient and we don't have to special case parent restore. + // Of course this will only ever impact ignoreXattrs. + if oldValue, err := te.fsEval.Lgetxattr(path, name); err == nil { + if bytes.Equal(value, oldValue) { + log.Debugf("restore xattr metadata: skipping already-set xattr %q: %s", name, hdr.Name) + continue + } + } + if te.partialRootless { + log.Warnf("rootless{%s} ignoring forbidden xattr: %q", hdr.Name, name) + continue + } + return errors.Errorf("restore xattr metadata: saw forbidden xattr %q: %s", name, hdr.Name) + } + if err := te.fsEval.Lsetxattr(path, name, value, 0); err != nil { + // In rootless mode, some xattrs will fail (security.capability). + // This is _fine_ as long as we're not running as root (in which + // case we shouldn't be ignoring xattrs that we were told to set). + // + // TODO: We should translate all security.capability capabilites + // into v3 capabilities, which allow us to write them as + // unprivileged users (we also would need to translate them + // back when creating archives). + if te.partialRootless && os.IsPermission(errors.Cause(err)) { + log.Warnf("rootless{%s} ignoring (usually) harmless EPERM on setxattr %q", hdr.Name, name) + continue + } + // We cannot do much if we get an ENOTSUP -- this usually means + // that extended attributes are simply unsupported by the + // underlying filesystem (such as AUFS or NFS). + if errors.Cause(err) == unix.ENOTSUP { + log.Warnf("xatt{%s} ignoring ENOTSUP on setxattr %q", hdr.Name, name) + continue + } + return errors.Wrapf(err, "restore xattr metadata: %s", path) + } + } + + if err := te.fsEval.Lutimes(path, atime, mtime); err != nil { + return errors.Wrapf(err, "restore lutimes metadata: %s", path) + } + + return nil +} + +// applyMetadata applies the state described in tar.Header to the filesystem at +// the given path, using the state of the TarExtractor to remap information +// within the header. This should only be used with headers from a tar layer +// (not from the filesystem). No sanity checking is done of the tar.Header's +// pathname or other information. +func (te *TarExtractor) applyMetadata(path string, hdr *tar.Header) error { + // Modify the header. + if err := unmapHeader(hdr, te.mapOptions); err != nil { + return errors.Wrap(err, "unmap header") + } + + // Restore it on the filesystme. + return te.restoreMetadata(path, hdr) +} + +// isDirlink returns whether the given path is a link to a directory (or a +// dirlink in rsync(1) parlance) which is used by --keep-dirlink to see whether +// we should extract through the link or clobber the link with a directory (in +// the case where we see a directory to extract and a symlink already exists +// there). +func (te *TarExtractor) isDirlink(root string, path string) (bool, error) { + // Make sure it exists and is a symlink. + if _, err := te.fsEval.Readlink(path); err != nil { + return false, errors.Wrap(err, "read dirlink") + } + + // Technically a string.TrimPrefix would also work... + unsafePath, err := filepath.Rel(root, path) + if err != nil { + return false, errors.Wrap(err, "get relative-to-root path") + } + + // It should be noted that SecureJoin will evaluate all symlinks in the + // path, so we don't need to loop over it or anything like that. It'll just + // be done for us (in UnpackEntry only the dirname(3) is evaluated but here + // we evaluate the whole thing). + targetPath, err := securejoin.SecureJoinVFS(root, unsafePath, te.fsEval) + if err != nil { + // We hit a symlink loop -- which is fine but that means that this + // cannot be considered a dirlink. + if errno := InnerErrno(err); errno == unix.ELOOP { + err = nil + } + return false, errors.Wrap(err, "sanitize old target") + } + + targetInfo, err := te.fsEval.Lstat(targetPath) + if err != nil { + // ENOENT or similar just means that it's a broken symlink, which + // means we have to overwrite it (but it's an allowed case). + if securejoin.IsNotExist(err) { + err = nil + } + return false, err + } + + return targetInfo.IsDir(), nil +} + +// UnpackEntry extracts the given tar.Header to the provided root, ensuring +// that the layer state is consistent with the layer state that produced the +// tar archive being iterated over. This does handle whiteouts, so a tar.Header +// that represents a whiteout will result in the path being removed. +func (te *TarExtractor) UnpackEntry(root string, hdr *tar.Header, r io.Reader) (Err error) { + // Make the paths safe. + hdr.Name = CleanPath(hdr.Name) + root = filepath.Clean(root) + + log.WithFields(log.Fields{ + "root": root, + "path": hdr.Name, + "type": hdr.Typeflag, + }).Debugf("unpacking entry") + + // Get directory and filename, but we have to safely get the directory + // component of the path. SecureJoinVFS will evaluate the path itself, + // which we don't want (we're clever enough to handle the actual path being + // a symlink). + unsafeDir, file := filepath.Split(hdr.Name) + if filepath.Join("/", hdr.Name) == "/" { + // If we got an entry for the root, then unsafeDir is the full path. + unsafeDir, file = hdr.Name, "." + } + dir, err := securejoin.SecureJoinVFS(root, unsafeDir, te.fsEval) + if err != nil { + return errors.Wrap(err, "sanitise symlinks in root") + } + path := filepath.Join(dir, file) + + // Before we do anything, get the state of dir. Because we might be adding + // or removing files, our parent directory might be modified in the + // process. As a result, we want to be able to restore the old state + // (because we only apply state that we find in the archive we're iterating + // over). We can safely ignore an error here, because a non-existent + // directory will be fixed by later archive entries. + if dirFi, err := te.fsEval.Lstat(dir); err == nil && path != dir { + // FIXME: This is really stupid. + // #nosec G104 + link, _ := te.fsEval.Readlink(dir) + dirHdr, err := tar.FileInfoHeader(dirFi, link) + if err != nil { + return errors.Wrap(err, "convert dirFi to dirHdr") + } + + // More faking to trick restoreMetadata to actually restore the directory. + dirHdr.Typeflag = tar.TypeDir + dirHdr.Linkname = "" + + // os.Lstat doesn't get the list of xattrs by default. We need to fill + // this explicitly. Note that while Go's "archive/tar" takes strings, + // in Go strings can be arbitrary byte sequences so this doesn't + // restrict the possible values. + // TODO: Move this to a separate function so we can share it with + // tar_generate.go. + xattrs, err := te.fsEval.Llistxattr(dir) + if err != nil { + return errors.Wrap(err, "get dirHdr.Xattrs") + } + if len(xattrs) > 0 { + dirHdr.Xattrs = map[string]string{} + for _, xattr := range xattrs { + value, err := te.fsEval.Lgetxattr(dir, xattr) + if err != nil { + return errors.Wrap(err, "get xattr") + } + dirHdr.Xattrs[xattr] = string(value) + } + } + + // Ensure that after everything we correctly re-apply the old metadata. + // We don't map this header because we're restoring files that already + // existed on the filesystem, not from a tar layer. + defer func() { + // Only overwrite the error if there wasn't one already. + if err := te.restoreMetadata(dir, dirHdr); err != nil { + if Err == nil { + Err = errors.Wrap(err, "restore parent directory") + } + } + }() + } + + // Currently the spec doesn't specify what the hdr.Typeflag of whiteout + // files is meant to be. We specifically only produce regular files + // ('\x00') but it could be possible that someone produces a different + // Typeflag, expecting that the path is the only thing that matters in a + // whiteout entry. + if strings.HasPrefix(file, whPrefix) { + isOpaque := file == whOpaque + file = strings.TrimPrefix(file, whPrefix) + + // We have to be quite careful here. While the most intuitive way of + // handling whiteouts would be to just RemoveAll without prejudice, We + // have to be careful here. If there is a whiteout entry for a file + // *after* a normal entry (in the same layer) then the whiteout must + // not remove the new entry. We handle this by keeping track of + // whichpaths have been touched by this layer's extraction (these form + // the "upperdir"). We also have to handle cases where a directory has + // been marked for deletion, but a child has been extracted in this + // layer. + + path = filepath.Join(dir, file) + if isOpaque { + path = dir + } + + // If the root doesn't exist we've got nothing to do. + // XXX: We currently cannot error out if a layer asks us to remove a + // non-existent path with this implementation (because we don't + // know if it was implicitly removed by another whiteout). In + // future we could add lowerPaths that would help track whether + // another whiteout caused the removal to "fail" or if the path + // was actually missing -- which would allow us to actually error + // out here if the layer is invalid). + if _, err := te.fsEval.Lstat(path); err != nil { + // Need to use securejoin.IsNotExist to handle ENOTDIR. + if securejoin.IsNotExist(err) { + err = nil + } + return errors.Wrap(err, "check whiteout target") + } + + // Walk over the path to remove it. We remove a given path as soon as + // it isn't present in upperPaths (which includes ancestors of paths + // we've extracted so we only need to look up the one path). Otherwise + // we iterate over any children and try again. The only difference + // between opaque whiteouts and regular whiteouts is that we don't + // delete the directory itself with opaque whiteouts. + err = te.fsEval.Walk(path, func(subpath string, info os.FileInfo, err error) error { + // If we are passed an error, bail unless it's ENOENT. + if err != nil { + // If something was deleted outside of our knowledge it's not + // the end of the world. In principle this shouldn't happen + // though, so we log it for posterity. + if os.IsNotExist(errors.Cause(err)) { + log.Debugf("whiteout removal hit already-deleted path: %s", subpath) + err = filepath.SkipDir + } + return err + } + + // Get the relative form of subpath to root to match + // te.upperPaths. + upperPath, err := filepath.Rel(root, subpath) + if err != nil { + return errors.Wrap(err, "find relative-to-root [should never happen]") + } + + // Remove the path only if it hasn't been touched. + if _, ok := te.upperPaths[upperPath]; !ok { + // Opaque whiteouts don't remove the directory itself, so skip + // the top-level directory. + if isOpaque && CleanPath(path) == CleanPath(subpath) { + return nil + } + + // Purge the path. We skip anything underneath (if it's a + // directory) since we just purged it -- and we don't want to + // hit ENOENT during iteration for no good reason. + err := errors.Wrap(te.fsEval.RemoveAll(subpath), "whiteout subpath") + if err == nil && info.IsDir() { + err = filepath.SkipDir + } + return err + } + return nil + }) + return errors.Wrap(err, "whiteout remove") + } + + // Get information about the path. This has to be done after we've dealt + // with whiteouts because it turns out that lstat(2) will return EPERM if + // you try to stat a whiteout on AUFS. + fi, err := te.fsEval.Lstat(path) + if err != nil { + // File doesn't exist, just switch fi to the file header. + fi = hdr.FileInfo() + } + + // Attempt to create the parent directory of the path we're unpacking. + // We do a MkdirAll here because even though you need to have a tar entry + // for every component of a new path, applyMetadata will correct any + // inconsistencies. + // FIXME: We have to make this consistent, since if the tar archive doesn't + // have entries for some of these components we won't be able to + // verify that we have consistent results during unpacking. + if err := te.fsEval.MkdirAll(dir, 0777); err != nil { + return errors.Wrap(err, "mkdir parent") + } + + isDirlink := false + // We remove whatever existed at the old path to clobber it so that + // creating a new path will not break. The only exception is if the path is + // a directory in both the layer and the current filesystem, in which case + // we don't delete it for obvious reasons. In all other cases we clobber. + // + // Note that this will cause hard-links in the "lower" layer to not be able + // to point to "upper" layer inodes even if the extracted type is the same + // as the old one, however it is not clear whether this is something a user + // would expect anyway. In addition, this will incorrectly deal with a + // TarLink that is present before the "upper" entry in the layer but the + // "lower" file still exists (so the hard-link would point to the old + // inode). It's not clear if such an archive is actually valid though. + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { + // If we are in --keep-dirlinks mode and the existing fs object is a + // symlink to a directory (with the pending object is a directory), we + // don't remove the symlink (and instead allow subsequent objects to be + // just written through the symlink into the directory). This is a very + // specific usecase where layers that were generated independently from + // each other (on different base filesystems) end up with weird things + // like /lib64 being a symlink only sometimes but you never want to + // delete libraries (not just the ones that were under the "real" + // directory). + // + // TODO: This code should also handle a pending symlink entry where the + // existing object is a directory. I'm not sure how we could + // disambiguate this from a symlink-to-a-file but I imagine that + // this is something that would also be useful in the same vein + // as --keep-dirlinks (which currently only prevents clobbering + // in the opposite case). + if te.mapOptions.KeepDirlinks && + fi.Mode()&os.ModeSymlink == os.ModeSymlink && hdr.Typeflag == tar.TypeDir { + isDirlink, err = te.isDirlink(root, path) + if err != nil { + return errors.Wrap(err, "check is dirlink") + } + } + if !(isDirlink && te.mapOptions.KeepDirlinks) { + if err := te.fsEval.RemoveAll(path); err != nil { + return errors.Wrap(err, "clobber old path") + } + } + } + + // Now create or otherwise modify the state of the path. Right now, either + // the type of path matches hdr or the path doesn't exist. Note that we + // don't care about umasks or the initial mode here, since applyMetadata + // will fix all of that for us. + switch hdr.Typeflag { + // regular file + case tar.TypeReg, tar.TypeRegA: + // Create a new file, then just copy the data. + fh, err := te.fsEval.Create(path) + if err != nil { + return errors.Wrap(err, "create regular") + } + defer fh.Close() + + // We need to make sure that we copy all of the bytes. + n, err := io.Copy(fh, r) + if int64(n) != hdr.Size { + err = io.ErrShortWrite + } + if err != nil { + return errors.Wrap(err, "unpack to regular file") + } + + // Force close here so that we don't affect the metadata. + if err := fh.Close(); err != nil { + return errors.Wrap(err, "close unpacked regular file") + } + + // directory + case tar.TypeDir: + if isDirlink { + break + } + + // Attempt to create the directory. We do a MkdirAll here because even + // though you need to have a tar entry for every component of a new + // path, applyMetadata will correct any inconsistencies. + if err := te.fsEval.MkdirAll(path, 0777); err != nil { + return errors.Wrap(err, "mkdirall") + } + + // hard link, symbolic link + case tar.TypeLink, tar.TypeSymlink: + linkname := hdr.Linkname + + // Hardlinks and symlinks act differently when it comes to the scoping. + // In both cases, we have to just unlink and then re-link the given + // path. But the function used and the argument are slightly different. + var linkFn func(string, string) error + switch hdr.Typeflag { + case tar.TypeLink: + linkFn = te.fsEval.Link + // Because hardlinks are inode-based we need to scope the link to + // the rootfs using SecureJoinVFS. As before, we need to be careful + // that we don't resolve the last part of the link path (in case + // the user actually wanted to hardlink to a symlink). + unsafeLinkDir, linkFile := filepath.Split(CleanPath(linkname)) + linkDir, err := securejoin.SecureJoinVFS(root, unsafeLinkDir, te.fsEval) + if err != nil { + return errors.Wrap(err, "sanitise hardlink target in root") + } + linkname = filepath.Join(linkDir, linkFile) + case tar.TypeSymlink: + linkFn = te.fsEval.Symlink + } + + // Link the new one. + if err := linkFn(linkname, path); err != nil { + // FIXME: Currently this can break if tar hardlink entries occur + // before we hit the entry those hardlinks link to. I have a + // feeling that such archives are invalid, but the correct + // way of handling this is to delay link creation until the + // very end. Unfortunately this won't work with symlinks + // (which can link to directories). + return errors.Wrap(err, "link") + } + + // character device node, block device node + case tar.TypeChar, tar.TypeBlock: + // In rootless mode we have no choice but to fake this, since mknod(2) + // doesn't work as an unprivileged user here. + // + // TODO: We need to add the concept of a fake block device in + // "user.rootlesscontainers", because this workaround suffers + // from the obvious issue that if the file is touched (even the + // metadata) then it will be incorrectly copied into the layer. + // This would break distribution images fairly badly. + if te.partialRootless { + log.Warnf("rootless{%s} creating empty file in place of device %d:%d", hdr.Name, hdr.Devmajor, hdr.Devminor) + fh, err := te.fsEval.Create(path) + if err != nil { + return errors.Wrap(err, "create rootless block") + } + defer fh.Close() + if err := fh.Chmod(0); err != nil { + return errors.Wrap(err, "chmod 0 rootless block") + } + goto out + } + + // Otherwise the handling is the same as a FIFO. + fallthrough + // fifo node + case tar.TypeFifo: + // We have to remove and then create the device. In the FIFO case we + // could choose not to do so, but we do it anyway just to be on the + // safe side. + + mode := system.Tarmode(hdr.Typeflag) + dev := unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor)) + + // Create the node. + if err := te.fsEval.Mknod(path, os.FileMode(int64(mode)|hdr.Mode), dev); err != nil { + return errors.Wrap(err, "mknod") + } + + // We should never hit any other headers (Go abstracts them away from us), + // and we can't handle any custom Tar extensions. So just error out. + default: + return fmt.Errorf("unpack entry: %s: unknown typeflag '\\x%x'", hdr.Name, hdr.Typeflag) + } + +out: + // Apply the metadata, which will apply any mappings necessary. We don't + // apply metadata for hardlinks, because hardlinks don't have any separate + // metadata from their link (and the tar headers might not be filled). + if hdr.Typeflag != tar.TypeLink { + if err := te.applyMetadata(path, hdr); err != nil { + return errors.Wrap(err, "apply hdr metadata") + } + } + + // Everything is done -- the path now exists. Add it (and all its + // ancestors) to the set of upper paths. We first have to figure out the + // proper path corresponding to hdr.Name though. + upperPath, err := filepath.Rel(root, path) + if err != nil { + // Really shouldn't happen because of the guarantees of SecureJoinVFS. + return errors.Wrap(err, "find relative-to-root [should never happen]") + } + for pth := upperPath; pth != filepath.Dir(pth); pth = filepath.Dir(pth) { + te.upperPaths[pth] = struct{}{} + } + return nil +} diff --git a/vendor/github.com/openSUSE/umoci/oci/layer/tar_generate.go b/vendor/github.com/openSUSE/umoci/oci/layer/tar_generate.go new file mode 100644 index 0000000000..1fd7dcd5ca --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/layer/tar_generate.go @@ -0,0 +1,305 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer + +import ( + "archive/tar" + "io" + "os" + "path/filepath" + "strings" + + "github.com/apex/log" + "github.com/openSUSE/umoci/pkg/fseval" + "github.com/openSUSE/umoci/pkg/testutils" + "github.com/pkg/errors" +) + +// ignoreXattrs is a list of xattr names that should be ignored when +// creating a new image layer, because they are host-specific and/or would be a +// bad idea to unpack. They are also excluded from Lclearxattr when extracting +// an archive. +// XXX: Maybe we should make this configurable so users can manually blacklist +// (or even whitelist) xattrs that they actually want included? Like how +// GNU tar's xattr setup works. +var ignoreXattrs = map[string]struct{}{ + // SELinux doesn't allow you to set SELinux policies generically. They're + // also host-specific. So just ignore them during extraction. + "security.selinux": {}, + + // NFSv4 ACLs are very system-specific and shouldn't be touched by us, nor + // should they be included in images. + "system.nfs4_acl": {}, +} + +func init() { + // For test purposes we add a fake forbidden attribute that an unprivileged + // user can easily write to (and thus we can test it). + if testutils.IsTestBinary() { + ignoreXattrs["user.UMOCI:forbidden_xattr"] = struct{}{} + } +} + +// tarGenerator is a helper for generating layer diff tars. It should be noted +// that when using tarGenerator.Add{Path,Whiteout} it is recommended to do it +// in lexicographic order. +type tarGenerator struct { + tw *tar.Writer + + // mapOptions is the set of mapping options for modifying entries before + // they're added to the layer. + mapOptions MapOptions + + // Hardlink mapping. + inodes map[uint64]string + + // fsEval is an fseval.FsEval used for extraction. + fsEval fseval.FsEval + + // XXX: Should we add a saftey check to make sure we don't generate two of + // the same path in a tar archive? This is not permitted by the spec. +} + +// newTarGenerator creates a new tarGenerator using the provided writer as the +// output writer. +func newTarGenerator(w io.Writer, opt MapOptions) *tarGenerator { + fsEval := fseval.DefaultFsEval + if opt.Rootless { + fsEval = fseval.RootlessFsEval + } + + return &tarGenerator{ + tw: tar.NewWriter(w), + mapOptions: opt, + inodes: map[uint64]string{}, + fsEval: fsEval, + } +} + +// normalise converts the provided pathname to a POSIX-compliant pathname. It also will provide an error if a path looks unsafe. +func normalise(rawPath string, isDir bool) (string, error) { + // Clean up the path. + path := CleanPath(rawPath) + + // Nothing to do. + if path == "." { + return ".", nil + } + + if filepath.IsAbs(path) { + path = strings.TrimPrefix(path, "/") + } + + // Check that the path is "safe", meaning that it doesn't resolve outside + // of the tar archive. While this might seem paranoid, it is a legitimate + // concern. + if "/"+path != filepath.Join("/", path) { + return "", errors.Errorf("escape warning: generated path is outside tar root: %s", rawPath) + } + + // With some other tar formats, you needed to have a '/' at the end of a + // pathname in order to state that it is a directory. While this is no + // longer necessary, some older tooling may assume that. + if isDir { + path += "/" + } + + return path, nil +} + +// AddFile adds a file from the filesystem to the tar archive. It copies all of +// the relevant stat information about the file, and also attempts to track +// hardlinks. This should be functionally equivalent to adding entries with GNU +// tar. +func (tg *tarGenerator) AddFile(name, path string) error { + fi, err := tg.fsEval.Lstat(path) + if err != nil { + return errors.Wrap(err, "add file lstat") + } + + linkname := "" + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkname, err = tg.fsEval.Readlink(path); err != nil { + return errors.Wrap(err, "add file readlink") + } + } + + hdr, err := tar.FileInfoHeader(fi, linkname) + if err != nil { + return errors.Wrap(err, "convert fi to hdr") + } + hdr.Xattrs = map[string]string{} + // Usually incorrect for containers and was added in Go 1.10 causing + // changes to our output on a compiler bump... + hdr.Uname = "" + hdr.Gname = "" + + name, err = normalise(name, fi.IsDir()) + if err != nil { + return errors.Wrap(err, "normalise path") + } + hdr.Name = name + + // Make sure that we don't include any files with the name ".wh.". This + // will almost certainly confuse some users (unfortunately) but there's + // nothing we can do to store such files on-disk. + if strings.HasPrefix(filepath.Base(name), whPrefix) { + return errors.Errorf("invalid path has whiteout prefix %q: %s", whPrefix, name) + } + + // FIXME: Do we need to ensure that the parent paths have all been added to + // the archive? I haven't found any tar specification that makes + // this mandatory, but I have a feeling that some people might rely + // on it. The issue with implementing it is that we'd have to get + // the FileInfo about the directory from somewhere (and we don't + // want to waste space by adding an entry that will be overwritten + // later). + + // Different systems have different special things they need to set within + // a tar header. For example, device numbers are quite important to be set + // by us. + statx, err := tg.fsEval.Lstatx(path) + if err != nil { + return errors.Wrapf(err, "lstatx %q", path) + } + updateHeader(hdr, statx) + + // Set up xattrs externally to updateHeader because the function signature + // would look really dumb otherwise. + // XXX: This should probably be moved to a function in tar_unix.go. + names, err := tg.fsEval.Llistxattr(path) + if err != nil { + return errors.Wrap(err, "get xattr list") + } + for _, name := range names { + // Some xattrs need to be skipped for sanity reasons, such as + // security.selinux, because they are very much host-specific and + // carrying them to other hosts would be a really bad idea. + if _, ignore := ignoreXattrs[name]; ignore { + continue + } + // TODO: We should translate all v3 capabilities into root-owned + // capabilities here. But we don't have Go code for that yet + // (we'd need to use libcap to parse it). + value, err := tg.fsEval.Lgetxattr(path, name) + if err != nil { + // XXX: I'm not sure if we're unprivileged whether Lgetxattr can + // fail with EPERM. If it can, we should ignore it (like when + // we try to clear xattrs). + return errors.Wrapf(err, "get xattr: %s", name) + } + // https://golang.org/issues/20698 -- We don't just error out here + // because it's not _really_ a fatal error. Currently it's unclear + // whether the stdlib will correctly handle reading or disable writing + // of these PAX headers so we have to track this ourselves. + if len(value) <= 0 { + log.Warnf("ignoring empty-valued xattr %s: disallowed by PAX standard", name) + continue + } + // Note that Go strings can actually be arbitrary byte sequences, so + // this conversion (while it might look a bit wrong) is actually fine. + hdr.Xattrs[name] = string(value) + } + + // Not all systems have the concept of an inode, but I'm not in the mood to + // handle this in a way that makes anything other than GNU/Linux happy + // right now. Handle hardlinks. + if oldpath, ok := tg.inodes[statx.Ino]; ok { + // We just hit a hardlink, so we just have to change the header. + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 + } else { + tg.inodes[statx.Ino] = name + } + + // Apply any header mappings. + if err := mapHeader(hdr, tg.mapOptions); err != nil { + return errors.Wrap(err, "map header") + } + if err := tg.tw.WriteHeader(hdr); err != nil { + return errors.Wrap(err, "write header") + } + + // Write the contents of regular files. + if hdr.Typeflag == tar.TypeReg { + fh, err := tg.fsEval.Open(path) + if err != nil { + return errors.Wrap(err, "open file") + } + defer fh.Close() + + n, err := io.Copy(tg.tw, fh) + if err != nil { + return errors.Wrap(err, "copy to layer") + } + if n != hdr.Size { + return errors.Wrap(io.ErrShortWrite, "copy to layer") + } + } + + return nil +} + +// whPrefix is the whiteout prefix, which is used to signify "special" files in +// an OCI image layer archive. An expanded filesystem image cannot contain +// files that have a basename starting with this prefix. +const whPrefix = ".wh." + +// whOpaque is the *full* basename of a special file which indicates that all +// siblings in a directory are to be dropped in the "lower" layer. +const whOpaque = whPrefix + whPrefix + ".opq" + +// addWhiteout adds a whiteout file for the given name inside the tar archive. +// It's not recommended to add a file with AddFile and then white it out. If +// you specify opaque, then the whiteout created is an opaque whiteout *for the +// directory path* given. +func (tg *tarGenerator) addWhiteout(name string, opaque bool) error { + name, err := normalise(name, false) + if err != nil { + return errors.Wrap(err, "normalise path") + } + + // Disallow having a whiteout of a whiteout, purely for our own sanity. + dir, file := filepath.Split(name) + if strings.HasPrefix(file, whPrefix) { + return errors.Errorf("invalid path has whiteout prefix %q: %s", whPrefix, name) + } + + // Figure out the whiteout name. + whiteout := filepath.Join(dir, whPrefix+file) + if opaque { + whiteout = filepath.Join(name, whOpaque) + } + + // Add a dummy header for the whiteout file. + return errors.Wrap(tg.tw.WriteHeader(&tar.Header{ + Name: whiteout, + Size: 0, + }), "write whiteout header") +} + +// AddWhiteout creates a whiteout for the provided path. +func (tg *tarGenerator) AddWhiteout(name string) error { + return tg.addWhiteout(name, false) +} + +// AddOpaqueWhiteout creates a whiteout for the provided path. +func (tg *tarGenerator) AddOpaqueWhiteout(name string) error { + return tg.addWhiteout(name, true) +} diff --git a/vendor/github.com/openSUSE/umoci/oci/layer/tar_unix.go b/vendor/github.com/openSUSE/umoci/oci/layer/tar_unix.go new file mode 100644 index 0000000000..03b3143e85 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/layer/tar_unix.go @@ -0,0 +1,33 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer + +import ( + "archive/tar" + + "golang.org/x/sys/unix" +) + +func updateHeader(hdr *tar.Header, s unix.Stat_t) { + // Currently the Go stdlib doesn't fill in the major/minor numbers of + // devices, so we have to do it manually. + if s.Mode&unix.S_IFBLK == unix.S_IFBLK || s.Mode&unix.S_IFCHR == unix.S_IFCHR { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) + } +} diff --git a/vendor/github.com/openSUSE/umoci/oci/layer/unpack.go b/vendor/github.com/openSUSE/umoci/oci/layer/unpack.go new file mode 100644 index 0000000000..af1c21e243 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/layer/unpack.go @@ -0,0 +1,456 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer + +import ( + "archive/tar" + // Import is necessary for go-digest. + _ "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/apex/log" + gzip "github.com/klauspost/pgzip" + "github.com/openSUSE/umoci/oci/cas" + "github.com/openSUSE/umoci/oci/casext" + iconv "github.com/openSUSE/umoci/oci/config/convert" + "github.com/openSUSE/umoci/pkg/fseval" + "github.com/openSUSE/umoci/pkg/idtools" + "github.com/openSUSE/umoci/pkg/system" + "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + rspec "github.com/opencontainers/runtime-spec/specs-go" + rgen "github.com/opencontainers/runtime-tools/generate" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +// UnpackLayer unpacks the tar stream representing an OCI layer at the given +// root. It ensures that the state of the root is as close as possible to the +// state used to create the layer. If an error is returned, the state of root +// is undefined (unpacking is not guaranteed to be atomic). +func UnpackLayer(root string, layer io.Reader, opt *MapOptions) error { + var mapOptions MapOptions + if opt != nil { + mapOptions = *opt + } + te := NewTarExtractor(mapOptions) + tr := tar.NewReader(layer) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrap(err, "read next entry") + } + if err := te.UnpackEntry(root, hdr, tr); err != nil { + return errors.Wrapf(err, "unpack entry: %s", hdr.Name) + } + } + return nil +} + +// RootfsName is the name of the rootfs directory inside the bundle path when +// generated. +const RootfsName = "rootfs" + +// isLayerType returns if the given MediaType is the media type of an image +// layer blob. This includes both distributable and non-distributable images. +func isLayerType(mediaType string) bool { + return mediaType == ispec.MediaTypeImageLayer || mediaType == ispec.MediaTypeImageLayerNonDistributable || + mediaType == ispec.MediaTypeImageLayerGzip || mediaType == ispec.MediaTypeImageLayerNonDistributableGzip +} + +func needsGunzip(mediaType string) bool { + return mediaType == ispec.MediaTypeImageLayerGzip || mediaType == ispec.MediaTypeImageLayerNonDistributableGzip +} + +// UnpackManifest extracts all of the layers in the given manifest, as well as +// generating a runtime bundle and configuration. The rootfs is extracted to +// /. +// +// FIXME: This interface is ugly. +func UnpackManifest(ctx context.Context, engine cas.Engine, bundle string, manifest ispec.Manifest, opt *MapOptions) (err error) { + // Create the bundle directory. We only error out if config.json or rootfs/ + // already exists, because we cannot be sure that the user intended us to + // extract over an existing bundle. + if err := os.MkdirAll(bundle, 0755); err != nil { + return errors.Wrap(err, "mkdir bundle") + } + // We change the mode of the bundle directory to 0700. A user can easily + // change this after-the-fact, but we do this explicitly to avoid cases + // where an unprivileged user could recurse into an otherwise unsafe image + // (giving them potential root access through setuid binaries for example). + if err := os.Chmod(bundle, 0700); err != nil { + return errors.Wrap(err, "chmod bundle 0700") + } + + configPath := filepath.Join(bundle, "config.json") + rootfsPath := filepath.Join(bundle, RootfsName) + + if _, err := os.Lstat(configPath); !os.IsNotExist(err) { + if err == nil { + err = fmt.Errorf("config.json already exists") + } + return errors.Wrap(err, "bundle path empty") + } + + defer func() { + if err != nil { + fsEval := fseval.DefaultFsEval + if opt != nil && opt.Rootless { + fsEval = fseval.RootlessFsEval + } + // It's too late to care about errors. + // #nosec G104 + _ = fsEval.RemoveAll(rootfsPath) + } + }() + + if _, err := os.Lstat(rootfsPath); !os.IsNotExist(err) { + if err == nil { + err = fmt.Errorf("%s already exists", rootfsPath) + } + return err + } + + log.Infof("unpack rootfs: %s", rootfsPath) + if err := UnpackRootfs(ctx, engine, rootfsPath, manifest, opt); err != nil { + return errors.Wrap(err, "unpack rootfs") + } + + // Generate a runtime configuration file from ispec.Image. + configFile, err := os.Create(configPath) + if err != nil { + return errors.Wrap(err, "open config.json") + } + defer configFile.Close() + + if err := UnpackRuntimeJSON(ctx, engine, configFile, rootfsPath, manifest, opt); err != nil { + return errors.Wrap(err, "unpack config.json") + } + return nil +} + +// UnpackRootfs extracts all of the layers in the given manifest. +// Some verification is done during image extraction. +func UnpackRootfs(ctx context.Context, engine cas.Engine, rootfsPath string, manifest ispec.Manifest, opt *MapOptions) (err error) { + engineExt := casext.NewEngine(engine) + + if err := os.Mkdir(rootfsPath, 0755); err != nil && !os.IsExist(err) { + return errors.Wrap(err, "mkdir rootfs") + } + + // In order to avoid having a broken rootfs in the case of an error, we + // remove the rootfs. In the case of rootless this is particularly + // important (`rm -rf` won't work on most distro rootfs's). + defer func() { + if err != nil { + fsEval := fseval.DefaultFsEval + if opt != nil && opt.Rootless { + fsEval = fseval.RootlessFsEval + } + // It's too late to care about errors. + // #nosec G104 + _ = fsEval.RemoveAll(rootfsPath) + } + }() + + // Make sure that the owner is correct. + rootUID, err := idtools.ToHost(0, opt.UIDMappings) + if err != nil { + return errors.Wrap(err, "ensure rootuid has mapping") + } + rootGID, err := idtools.ToHost(0, opt.GIDMappings) + if err != nil { + return errors.Wrap(err, "ensure rootgid has mapping") + } + if err := os.Lchown(rootfsPath, rootUID, rootGID); err != nil { + return errors.Wrap(err, "chown rootfs") + } + + // Currently, many different images in the wild don't specify what the + // atime/mtime of the root directory is. This is a huge pain because it + // means that we can't ensure consistent unpacking. In order to get around + // this, we first set the mtime of the root directory to the Unix epoch + // (which is as good of an arbitrary choice as any). + epoch := time.Unix(0, 0) + if err := system.Lutimes(rootfsPath, epoch, epoch); err != nil { + return errors.Wrap(err, "set initial root time") + } + + // In order to verify the DiffIDs as we extract layers, we have to get the + // .Config blob first. But we can't extract it (generate the runtime + // config) until after we have the full rootfs generated. + configBlob, err := engineExt.FromDescriptor(ctx, manifest.Config) + if err != nil { + return errors.Wrap(err, "get config blob") + } + defer configBlob.Close() + if configBlob.Descriptor.MediaType != ispec.MediaTypeImageConfig { + return errors.Errorf("unpack rootfs: config blob is not correct mediatype %s: %s", ispec.MediaTypeImageConfig, configBlob.Descriptor.MediaType) + } + config, ok := configBlob.Data.(ispec.Image) + if !ok { + // Should _never_ be reached. + return errors.Errorf("[internal error] unknown config blob type: %s", configBlob.Descriptor.MediaType) + } + + // We can't understand non-layer images. + if config.RootFS.Type != "layers" { + return errors.Errorf("unpack rootfs: config: unsupported rootfs.type: %s", config.RootFS.Type) + } + + // Layer extraction. + for idx, layerDescriptor := range manifest.Layers { + layerDiffID := config.RootFS.DiffIDs[idx] + log.Infof("unpack layer: %s", layerDescriptor.Digest) + + layerBlob, err := engineExt.FromDescriptor(ctx, layerDescriptor) + if err != nil { + return errors.Wrap(err, "get layer blob") + } + defer layerBlob.Close() + if !isLayerType(layerBlob.Descriptor.MediaType) { + return errors.Errorf("unpack rootfs: layer %s: blob is not correct mediatype: %s", layerBlob.Descriptor.Digest, layerBlob.Descriptor.MediaType) + } + layerData, ok := layerBlob.Data.(io.ReadCloser) + if !ok { + // Should _never_ be reached. + return errors.Errorf("[internal error] layerBlob was not an io.ReadCloser") + } + + layerRaw := layerData + if needsGunzip(layerBlob.Descriptor.MediaType) { + // We have to extract a gzip'd version of the above layer. Also note + // that we have to check the DiffID we're extracting (which is the + // sha256 sum of the *uncompressed* layer). + layerRaw, err = gzip.NewReader(layerData) + if err != nil { + return errors.Wrap(err, "create gzip reader") + } + } + + layerDigester := digest.SHA256.Digester() + layer := io.TeeReader(layerRaw, layerDigester.Hash()) + + if err := UnpackLayer(rootfsPath, layer, opt); err != nil { + return errors.Wrap(err, "unpack layer") + } + // Different tar implementations can have different levels of redundant + // padding and other similar weird behaviours. While on paper they are + // all entirely valid archives, Go's tar.Reader implementation doesn't + // guarantee that the entire stream will be consumed (which can result + // in the later diff_id check failing because the digester didn't get + // the whole uncompressed stream). Just blindly consume anything left + // in the layer. + if _, err = io.Copy(ioutil.Discard, layer); err != nil { + return errors.Wrap(err, "discard trailing archive bits") + } + if err := layerData.Close(); err != nil { + return errors.Wrap(err, "close layer data") + } + + layerDigest := layerDigester.Digest() + if layerDigest != layerDiffID { + return errors.Errorf("unpack manifest: layer %s: diffid mismatch: got %s expected %s", layerDescriptor.Digest, layerDigest, layerDiffID) + } + } + + return nil +} + +// UnpackRuntimeJSON converts a given manifest's configuration to a runtime +// configuration and writes it to the given writer. If rootfs is specified, it +// is sourced during the configuration generation (for conversion of +// Config.User and other similar jobs -- which will error out if the user could +// not be parsed). If rootfs is not specified (is an empty string) then all +// conversions that require sourcing the rootfs will be set to their default +// values. +// +// XXX: I don't like this API. It has way too many arguments. +func UnpackRuntimeJSON(ctx context.Context, engine cas.Engine, configFile io.Writer, rootfs string, manifest ispec.Manifest, opt *MapOptions) error { + engineExt := casext.NewEngine(engine) + + var mapOptions MapOptions + if opt != nil { + mapOptions = *opt + } + + // In order to verify the DiffIDs as we extract layers, we have to get the + // .Config blob first. But we can't extract it (generate the runtime + // config) until after we have the full rootfs generated. + configBlob, err := engineExt.FromDescriptor(ctx, manifest.Config) + if err != nil { + return errors.Wrap(err, "get config blob") + } + defer configBlob.Close() + if configBlob.Descriptor.MediaType != ispec.MediaTypeImageConfig { + return errors.Errorf("unpack manifest: config blob is not correct mediatype %s: %s", ispec.MediaTypeImageConfig, configBlob.Descriptor.MediaType) + } + config, ok := configBlob.Data.(ispec.Image) + if !ok { + // Should _never_ be reached. + return errors.Errorf("[internal error] unknown config blob type: %s", configBlob.Descriptor.MediaType) + } + + g, err := rgen.New(runtime.GOOS) + if err != nil { + return errors.Wrap(err, "create config.json generator") + } + if err := iconv.MutateRuntimeSpec(g, rootfs, config); err != nil { + return errors.Wrap(err, "generate config.json") + } + + // Add UIDMapping / GIDMapping options. + if len(mapOptions.UIDMappings) > 0 || len(mapOptions.GIDMappings) > 0 { + // #nosec G104 + _ = g.AddOrReplaceLinuxNamespace("user", "") + } + g.ClearLinuxUIDMappings() + for _, m := range mapOptions.UIDMappings { + g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size) + } + g.ClearLinuxGIDMappings() + for _, m := range mapOptions.GIDMappings { + g.AddLinuxGIDMapping(m.HostID, m.ContainerID, m.Size) + } + if mapOptions.Rootless { + ToRootless(g.Spec()) + const resolvConf = "/etc/resolv.conf" + // If we are using user namespaces, then we must make sure that we + // don't drop any of the CL_UNPRIVILEGED "locked" flags of the source + // "mount" when we bind-mount. The reason for this is that at the point + // when runc sets up the root filesystem, it is already inside a user + // namespace, and thus cannot change any flags that are locked. + unprivOpts, err := getUnprivilegedMountFlags(resolvConf) + if err != nil { + return errors.Wrapf(err, "inspecting mount flags of %s", resolvConf) + } + g.AddMount(rspec.Mount{ + Destination: resolvConf, + Source: resolvConf, + Type: "none", + Options: append(unprivOpts, []string{"bind", "ro"}...), + }) + } + + // Save the config.json. + if err := g.Save(configFile, rgen.ExportOptions{}); err != nil { + return errors.Wrap(err, "write config.json") + } + return nil +} + +// ToRootless converts a specification to a version that works with rootless +// containers. This is done by removing options and other settings that clash +// with unprivileged user namespaces. +func ToRootless(spec *rspec.Spec) { + var namespaces []rspec.LinuxNamespace + + // Remove additional groups. + spec.Process.User.AdditionalGids = nil + + // Remove networkns from the spec. + for _, ns := range spec.Linux.Namespaces { + switch ns.Type { + case rspec.NetworkNamespace, rspec.UserNamespace: + // Do nothing. + default: + namespaces = append(namespaces, ns) + } + } + // Add userns to the spec. + namespaces = append(namespaces, rspec.LinuxNamespace{ + Type: rspec.UserNamespace, + }) + spec.Linux.Namespaces = namespaces + + // Fix up mounts. + var mounts []rspec.Mount + for _, mount := range spec.Mounts { + // Ignore all mounts that are under /sys. + if strings.HasPrefix(mount.Destination, "/sys") { + continue + } + + // Remove all gid= and uid= mappings. + var options []string + for _, option := range mount.Options { + if !strings.HasPrefix(option, "gid=") && !strings.HasPrefix(option, "uid=") { + options = append(options, option) + } + } + + mount.Options = options + mounts = append(mounts, mount) + } + // Add the sysfs mount as an rbind. + mounts = append(mounts, rspec.Mount{ + Source: "/sys", + Destination: "/sys", + Type: "none", + Options: []string{"rbind", "nosuid", "noexec", "nodev", "ro"}, + }) + spec.Mounts = mounts + + // Remove cgroup settings. + spec.Linux.Resources = nil +} + +// Get the set of mount flags that are set on the mount that contains the given +// path and are locked by CL_UNPRIVILEGED. This is necessary to ensure that +// bind-mounting "with options" will not fail with user namespaces, due to +// kernel restrictions that require user namespace mounts to preserve +// CL_UNPRIVILEGED locked flags. +// +// Ported from https://github.com/moby/moby/pull/35205 +func getUnprivilegedMountFlags(path string) ([]string, error) { + var statfs unix.Statfs_t + if err := unix.Statfs(path, &statfs); err != nil { + return nil, err + } + + // The set of keys come from https://github.com/torvalds/linux/blob/v4.13/fs/namespace.c#L1034-L1048. + unprivilegedFlags := map[uint64]string{ + unix.MS_RDONLY: "ro", + unix.MS_NODEV: "nodev", + unix.MS_NOEXEC: "noexec", + unix.MS_NOSUID: "nosuid", + unix.MS_NOATIME: "noatime", + unix.MS_RELATIME: "relatime", + unix.MS_NODIRATIME: "nodiratime", + } + + var flags []string + for mask, flag := range unprivilegedFlags { + if uint64(statfs.Flags)&mask == mask { + flags = append(flags, flag) + } + } + + return flags, nil +} diff --git a/vendor/github.com/openSUSE/umoci/oci/layer/utils.go b/vendor/github.com/openSUSE/umoci/oci/layer/utils.go new file mode 100644 index 0000000000..e2d75ce35e --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/oci/layer/utils.go @@ -0,0 +1,234 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer + +import ( + "archive/tar" + "os" + "path/filepath" + + "github.com/apex/log" + "github.com/golang/protobuf/proto" + "github.com/openSUSE/umoci/pkg/idtools" + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + rootlesscontainers "github.com/rootless-containers/proto/go-proto" +) + +// MapOptions specifies the UID and GID mappings used when unpacking and +// repacking images. +type MapOptions struct { + // UIDMappings and GIDMappings are the UID and GID mappings to apply when + // packing and unpacking image rootfs layers. + UIDMappings []rspec.LinuxIDMapping `json:"uid_mappings"` + GIDMappings []rspec.LinuxIDMapping `json:"gid_mappings"` + + // Rootless specifies whether any to error out if chown fails. + Rootless bool `json:"rootless"` + + // KeepDirlinks is essentially the same as rsync's optio + // --keep-dirlinks: if, on extraction, a directory would be created + // where a symlink to a directory previously existed, KeepDirlinks + // doesn't create that directory, but instead just uses the existing + // symlink. + KeepDirlinks bool `json:"-"` +} + +// mapHeader maps a tar.Header generated from the filesystem so that it +// describes the inode as it would be observed by a container process. In +// particular this involves apply an ID mapping from the host filesystem to the +// container mappings. Returns an error if it's not possible to map the given +// UID. +func mapHeader(hdr *tar.Header, mapOptions MapOptions) error { + var newUID, newGID int + + // It only makes sense to do un-mapping if we're not rootless. If we're + // rootless then all of the files will be owned by us anyway. + if !mapOptions.Rootless { + var err error + newUID, err = idtools.ToContainer(hdr.Uid, mapOptions.UIDMappings) + if err != nil { + return errors.Wrap(err, "map uid to container") + } + newGID, err = idtools.ToContainer(hdr.Gid, mapOptions.GIDMappings) + if err != nil { + return errors.Wrap(err, "map gid to container") + } + } + + // We have special handling for the "user.rootlesscontainers" xattr. If + // we're rootless then we override the owner of the file we're currently + // parsing (and then remove the xattr). If we're not rootless then the user + // is doing something strange, so we log a warning but just ignore the + // xattr otherwise. + // + // TODO: We should probably add a flag to opt-out of this (though I'm not + // sure why anyone would intentionally use this incorrectly). + if value, ok := hdr.Xattrs[rootlesscontainers.Keyname]; !ok { + // noop + } else if !mapOptions.Rootless { + log.Warnf("suspicious filesystem: saw special rootless xattr %s in non-rootless invocation", rootlesscontainers.Keyname) + } else { + var payload rootlesscontainers.Resource + if err := proto.Unmarshal([]byte(value), &payload); err != nil { + return errors.Wrap(err, "unmarshal rootlesscontainers payload") + } + + // If the payload isn't uint32(-1) we apply it. The xattr includes the + // *in-container* owner so we don't want to map it. + if uid := payload.GetUid(); uid != rootlesscontainers.NoopID { + newUID = int(uid) + } + if gid := payload.GetGid(); gid != rootlesscontainers.NoopID { + newGID = int(gid) + } + + // Drop the xattr since it's just a marker for us and shouldn't be in + // layers. This is technically out-of-spec, but so is + // "user.rootlesscontainers". + delete(hdr.Xattrs, rootlesscontainers.Keyname) + } + + hdr.Uid = newUID + hdr.Gid = newGID + return nil +} + +// unmapHeader maps a tar.Header from a tar layer stream so that it describes +// the inode as it would be exist on the host filesystem. In particular this +// involves applying an ID mapping from the container filesystem to the host +// mappings. Returns an error if it's not possible to map the given UID. +func unmapHeader(hdr *tar.Header, mapOptions MapOptions) error { + // To avoid nil references. + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + + // If there is already a "user.rootlesscontainers" we give a warning in + // both rootless and root cases -- but in rootless we explicitly delete the + // entry because we might replace it. + if _, ok := hdr.Xattrs[rootlesscontainers.Keyname]; ok { + if mapOptions.Rootless { + log.Warnf("rootless{%s} ignoring special xattr %s stored in layer", hdr.Name, rootlesscontainers.Keyname) + delete(hdr.Xattrs, rootlesscontainers.Keyname) + } else { + log.Warnf("suspicious layer: saw special xattr %s in non-rootless invocation", rootlesscontainers.Keyname) + } + } + + // In rootless mode there are a few things we need to do. We need to map + // all of the files in the layer to have an owner of (0, 0) because we + // cannot lchown(2) anything -- and then if the owner was non-root we have + // to create a "user.rootlesscontainers" xattr for it. + if mapOptions.Rootless { + // Fill the rootlesscontainers payload with the original (uid, gid). If + // either is 0, we replace it with uint32(-1). Technically we could + // just leave it as 0 (since that is what the source of truth told us + // the owner was), but this would result in a massive increase in + // xattrs with no real benefit. + payload := rootlesscontainers.Resource{ + Uid: rootlesscontainers.NoopID, + Gid: rootlesscontainers.NoopID, + } + if uid := hdr.Uid; uid != 0 { + payload.Uid = uint32(uid) + } + if gid := hdr.Gid; gid != 0 { + payload.Gid = uint32(gid) + } + + // Don't add the xattr if the owner isn't just (0, 0) because that's a + // waste of space. + if !rootlesscontainers.IsDefault(payload) { + valueBytes, err := proto.Marshal(&payload) + if err != nil { + return errors.Wrap(err, "marshal rootlesscontainers payload") + } + // While the payload is almost certainly not UTF-8, Go strings can + // actually be arbitrary bytes (in case you didn't know this and + // were confused like me when this worked). See + // for more detail. + hdr.Xattrs[rootlesscontainers.Keyname] = string(valueBytes) + } + + hdr.Uid = 0 + hdr.Gid = 0 + } + + newUID, err := idtools.ToHost(hdr.Uid, mapOptions.UIDMappings) + if err != nil { + return errors.Wrap(err, "map uid to host") + } + newGID, err := idtools.ToHost(hdr.Gid, mapOptions.GIDMappings) + if err != nil { + return errors.Wrap(err, "map gid to host") + } + + hdr.Uid = newUID + hdr.Gid = newGID + return nil +} + +// CleanPath makes a path safe for use with filepath.Join. This is done by not +// only cleaning the path, but also (if the path is relative) adding a leading +// '/' and cleaning it (then removing the leading '/'). This ensures that a +// path resulting from prepending another path will always resolve to lexically +// be a subdirectory of the prefixed path. This is all done lexically, so paths +// that include symlinks won't be safe as a result of using CleanPath. +// +// This function comes from runC (libcontainer/utils/utils.go). +func CleanPath(path string) string { + // Deal with empty strings nicely. + if path == "" { + return "" + } + + // Ensure that all paths are cleaned (especially problematic ones like + // "/../../../../../" which can cause lots of issues). + path = filepath.Clean(path) + + // If the path isn't absolute, we need to do more processing to fix paths + // such as "../../../..//some/path". We also shouldn't convert absolute + // paths to relative ones. + if !filepath.IsAbs(path) { + path = filepath.Clean(string(os.PathSeparator) + path) + // This can't fail, as (by definition) all paths are relative to root. + // #nosec G104 + path, _ = filepath.Rel(string(os.PathSeparator), path) + } + + // Clean the path again for good measure. + return filepath.Clean(path) +} + +// InnerErrno returns the "real" system error from an error that originally +// came from the "os" package. The returned error can be compared directly with +// unix.* (or syscall.*) errno values. If the type could not be detected we just return +func InnerErrno(err error) error { + // All of the os.* cases as well as an explicit + errno := errors.Cause(err) + switch err := errno.(type) { + case *os.PathError: + errno = err.Err + case *os.LinkError: + errno = err.Err + case *os.SyscallError: + errno = err.Err + } + return errno +} diff --git a/vendor/github.com/openSUSE/umoci/pkg/fseval/fseval.go b/vendor/github.com/openSUSE/umoci/pkg/fseval/fseval.go new file mode 100644 index 0000000000..bc8f1d5d36 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/pkg/fseval/fseval.go @@ -0,0 +1,102 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fseval + +import ( + "os" + "path/filepath" + "time" + + "github.com/vbatts/go-mtree" + "golang.org/x/sys/unix" +) + +// Ensure that mtree.FsEval is implemented by FsEval. +var _ mtree.FsEval = DefaultFsEval +var _ mtree.FsEval = RootlessFsEval + +// FsEval is a super-interface that implements everything required for +// mtree.FsEval as well as including all of the imporant os.* wrapper functions +// needed for "oci/layers".tarExtractor. +type FsEval interface { + // Open is equivalent to os.Open. + Open(path string) (*os.File, error) + + // Create is equivalent to os.Create. + Create(path string) (*os.File, error) + + // Readdir is equivalent to os.Readdir. + Readdir(path string) ([]os.FileInfo, error) + + // Lstat is equivalent to os.Lstat. + Lstat(path string) (os.FileInfo, error) + + // Lstatx is equivalent to unix.Lstat. + Lstatx(path string) (unix.Stat_t, error) + + // Readlink is equivalent to os.Readlink. + Readlink(path string) (string, error) + + // Symlink is equivalent to os.Symlink. + Symlink(linkname, path string) error + + // Link is equivalent to os.Link. + Link(linkname, path string) error + + // Chmod is equivalent to os.Chmod. + Chmod(path string, mode os.FileMode) error + + // Lutimes is equivalent to os.Lutimes. + Lutimes(path string, atime, mtime time.Time) error + + // Remove is equivalent to os.Remove. + Remove(path string) error + + // RemoveAll is equivalent to os.RemoveAll. + RemoveAll(path string) error + + // Mkdir is equivalent to os.Mkdir. + Mkdir(path string, perm os.FileMode) error + + // MkdirAll is equivalent to os.MkdirAll. + MkdirAll(path string, perm os.FileMode) error + + // Mknod is equivalent to unix.Mknod. + Mknod(path string, mode os.FileMode, dev uint64) error + + // Llistxattr is equivalent to system.Llistxattr + Llistxattr(path string) ([]string, error) + + // Lremovexattr is equivalent to system.Lremovexattr + Lremovexattr(path, name string) error + + // Lsetxattr is equivalent to system.Lsetxattr + Lsetxattr(path, name string, value []byte, flags int) error + + // Lgetxattr is equivalent to system.Lgetxattr + Lgetxattr(path string, name string) ([]byte, error) + + // Lclearxattrs is equivalent to system.Lclearxattrs + Lclearxattrs(path string, except map[string]struct{}) error + + // KeywordFunc returns a wrapper around the given mtree.KeywordFunc. + KeywordFunc(fn mtree.KeywordFunc) mtree.KeywordFunc + + // Walk is equivalent to filepath.Walk. + Walk(root string, fn filepath.WalkFunc) error +} diff --git a/vendor/github.com/openSUSE/umoci/pkg/fseval/fseval_default.go b/vendor/github.com/openSUSE/umoci/pkg/fseval/fseval_default.go new file mode 100644 index 0000000000..4eae0aeef6 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/pkg/fseval/fseval_default.go @@ -0,0 +1,154 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fseval + +import ( + "os" + "path/filepath" + "time" + + "github.com/openSUSE/umoci/pkg/system" + "github.com/vbatts/go-mtree" + "golang.org/x/sys/unix" +) + +// DefaultFsEval is the "identity" form of FsEval. In particular, it does not +// do any trickery and calls directly to the relevant os.* functions (and does +// not wrap KeywordFunc). This should be used by default, because there are no +// weird side-effects. +var DefaultFsEval FsEval = osFsEval(0) + +// osFsEval is a hack to be able to make DefaultFsEval a const. +type osFsEval int + +// Open is equivalent to os.Open. +func (fs osFsEval) Open(path string) (*os.File, error) { + return os.Open(path) +} + +// Create is equivalent to os.Create. +func (fs osFsEval) Create(path string) (*os.File, error) { + return os.Create(path) +} + +// Readdir is equivalent to os.Readdir. +func (fs osFsEval) Readdir(path string) ([]os.FileInfo, error) { + fh, err := os.Open(path) + if err != nil { + return nil, err + } + defer fh.Close() + return fh.Readdir(-1) +} + +// Lstat is equivalent to os.Lstat. +func (fs osFsEval) Lstat(path string) (os.FileInfo, error) { + return os.Lstat(path) +} + +// Lstatx is equivalent to unix.Lstat. +func (fs osFsEval) Lstatx(path string) (unix.Stat_t, error) { + var s unix.Stat_t + err := unix.Lstat(path, &s) + return s, err +} + +// Readlink is equivalent to os.Readlink. +func (fs osFsEval) Readlink(path string) (string, error) { + return os.Readlink(path) +} + +// Symlink is equivalent to os.Symlink. +func (fs osFsEval) Symlink(linkname, path string) error { + return os.Symlink(linkname, path) +} + +// Link is equivalent to os.Link. +func (fs osFsEval) Link(linkname, path string) error { + return os.Link(linkname, path) +} + +// Chmod is equivalent to os.Chmod. +func (fs osFsEval) Chmod(path string, mode os.FileMode) error { + return os.Chmod(path, mode) +} + +// Lutimes is equivalent to os.Lutimes. +func (fs osFsEval) Lutimes(path string, atime, mtime time.Time) error { + return system.Lutimes(path, atime, mtime) +} + +// Remove is equivalent to os.Remove. +func (fs osFsEval) Remove(path string) error { + return os.Remove(path) +} + +// RemoveAll is equivalent to os.RemoveAll. +func (fs osFsEval) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +// Mkdir is equivalent to os.Mkdir. +func (fs osFsEval) Mkdir(path string, perm os.FileMode) error { + return os.Mkdir(path, perm) +} + +// Mknod is equivalent to unix.Mknod. +func (fs osFsEval) Mknod(path string, mode os.FileMode, dev uint64) error { + return unix.Mknod(path, uint32(mode), int(dev)) +} + +// MkdirAll is equivalent to os.MkdirAll. +func (fs osFsEval) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// Llistxattr is equivalent to system.Llistxattr +func (fs osFsEval) Llistxattr(path string) ([]string, error) { + return system.Llistxattr(path) +} + +// Lremovexattr is equivalent to system.Lremovexattr +func (fs osFsEval) Lremovexattr(path, name string) error { + return unix.Lremovexattr(path, name) +} + +// Lsetxattr is equivalent to system.Lsetxattr +func (fs osFsEval) Lsetxattr(path, name string, value []byte, flags int) error { + return unix.Lsetxattr(path, name, value, flags) +} + +// Lgetxattr is equivalent to system.Lgetxattr +func (fs osFsEval) Lgetxattr(path string, name string) ([]byte, error) { + return system.Lgetxattr(path, name) +} + +// Lclearxattrs is equivalent to system.Lclearxattrs +func (fs osFsEval) Lclearxattrs(path string, except map[string]struct{}) error { + return system.Lclearxattrs(path, except) +} + +// KeywordFunc returns a wrapper around the given mtree.KeywordFunc. +func (fs osFsEval) KeywordFunc(fn mtree.KeywordFunc) mtree.KeywordFunc { + return fn +} + +// Walk is equivalent to filepath.Walk. +func (fs osFsEval) Walk(root string, fn filepath.WalkFunc) error { + return filepath.Walk(root, fn) +} diff --git a/vendor/github.com/openSUSE/umoci/pkg/fseval/fseval_rootless.go b/vendor/github.com/openSUSE/umoci/pkg/fseval/fseval_rootless.go new file mode 100644 index 0000000000..ec94f25cfe --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/pkg/fseval/fseval_rootless.go @@ -0,0 +1,156 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fseval + +import ( + "io" + "os" + "path/filepath" + "time" + + "github.com/openSUSE/umoci/pkg/unpriv" + "github.com/vbatts/go-mtree" + "golang.org/x/sys/unix" +) + +// RootlessFsEval is an FsEval implementation that uses "umoci/pkg/unpriv".* +// functions in order to provide the ability for unprivileged users (those +// without CAP_DAC_OVERRIDE and CAP_DAC_READ_SEARCH) to evaluate parts of a +// filesystem that they own. Note that by necessity this requires modifying the +// filesystem (and thus will not work on read-only filesystems). +var RootlessFsEval FsEval = unprivFsEval(0) + +// unprivFsEval is a hack to be able to make RootlessFsEval a const. +type unprivFsEval int + +// Open is equivalent to unpriv.Open. +func (fs unprivFsEval) Open(path string) (*os.File, error) { + return unpriv.Open(path) +} + +// Create is equivalent to unpriv.Create. +func (fs unprivFsEval) Create(path string) (*os.File, error) { + return unpriv.Create(path) +} + +// Readdir is equivalent to unpriv.Readdir. +func (fs unprivFsEval) Readdir(path string) ([]os.FileInfo, error) { + return unpriv.Readdir(path) +} + +// Lstat is equivalent to unpriv.Lstat. +func (fs unprivFsEval) Lstat(path string) (os.FileInfo, error) { + return unpriv.Lstat(path) +} + +func (fs unprivFsEval) Lstatx(path string) (unix.Stat_t, error) { + return unpriv.Lstatx(path) +} + +// Readlink is equivalent to unpriv.Readlink. +func (fs unprivFsEval) Readlink(path string) (string, error) { + return unpriv.Readlink(path) +} + +// Symlink is equivalent to unpriv.Symlink. +func (fs unprivFsEval) Symlink(linkname, path string) error { + return unpriv.Symlink(linkname, path) +} + +// Link is equivalent to unpriv.Link. +func (fs unprivFsEval) Link(linkname, path string) error { + return unpriv.Link(linkname, path) +} + +// Chmod is equivalent to unpriv.Chmod. +func (fs unprivFsEval) Chmod(path string, mode os.FileMode) error { + return unpriv.Chmod(path, mode) +} + +// Lutimes is equivalent to unpriv.Lutimes. +func (fs unprivFsEval) Lutimes(path string, atime, mtime time.Time) error { + return unpriv.Lutimes(path, atime, mtime) +} + +// Remove is equivalent to unpriv.Remove. +func (fs unprivFsEval) Remove(path string) error { + return unpriv.Remove(path) +} + +// RemoveAll is equivalent to unpriv.RemoveAll. +func (fs unprivFsEval) RemoveAll(path string) error { + return unpriv.RemoveAll(path) +} + +// Mkdir is equivalent to unpriv.Mkdir. +func (fs unprivFsEval) Mkdir(path string, perm os.FileMode) error { + return unpriv.Mkdir(path, perm) +} + +// Mknod is equivalent to unpriv.Mknod. +func (fs unprivFsEval) Mknod(path string, mode os.FileMode, dev uint64) error { + return unpriv.Mknod(path, mode, dev) +} + +// MkdirAll is equivalent to unpriv.MkdirAll. +func (fs unprivFsEval) MkdirAll(path string, perm os.FileMode) error { + return unpriv.MkdirAll(path, perm) +} + +// Llistxattr is equivalent to unpriv.Llistxattr +func (fs unprivFsEval) Llistxattr(path string) ([]string, error) { + return unpriv.Llistxattr(path) +} + +// Lremovexattr is equivalent to unpriv.Lremovexattr +func (fs unprivFsEval) Lremovexattr(path, name string) error { + return unpriv.Lremovexattr(path, name) +} + +// Lsetxattr is equivalent to unpriv.Lsetxattr +func (fs unprivFsEval) Lsetxattr(path, name string, value []byte, flags int) error { + return unpriv.Lsetxattr(path, name, value, flags) +} + +// Lgetxattr is equivalent to unpriv.Lgetxattr +func (fs unprivFsEval) Lgetxattr(path string, name string) ([]byte, error) { + return unpriv.Lgetxattr(path, name) +} + +// Lclearxattrs is equivalent to unpriv.Lclearxattrs +func (fs unprivFsEval) Lclearxattrs(path string, except map[string]struct{}) error { + return unpriv.Lclearxattrs(path, except) +} + +// KeywordFunc returns a wrapper around the given mtree.KeywordFunc. +func (fs unprivFsEval) KeywordFunc(fn mtree.KeywordFunc) mtree.KeywordFunc { + return func(path string, info os.FileInfo, r io.Reader) ([]mtree.KeyVal, error) { + var kv []mtree.KeyVal + err := unpriv.Wrap(path, func(path string) error { + var err error + kv, err = fn(path, info, r) + return err + }) + return kv, err + } +} + +// Walk is equivalent to filepath.Walk. +func (fs unprivFsEval) Walk(root string, fn filepath.WalkFunc) error { + return unpriv.Walk(root, fn) +} diff --git a/vendor/github.com/openSUSE/umoci/pkg/hardening/verified_reader.go b/vendor/github.com/openSUSE/umoci/pkg/hardening/verified_reader.go new file mode 100644 index 0000000000..7474358944 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/pkg/hardening/verified_reader.go @@ -0,0 +1,176 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hardening + +import ( + "io" + + "github.com/apex/log" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Exported errors for verification issues that occur during processing within +// VerifiedReadCloser. Note that you will need to use +// "github.com/pkg/errors".Cause to get these exported errors in most cases. +var ( + ErrDigestMismatch = errors.Errorf("verified reader digest mismatch") + ErrSizeMismatch = errors.Errorf("verified reader size mismatch") +) + +// VerifiedReadCloser is a basic io.ReadCloser which allows for simple +// verification that a stream matches an expected hash. The entire stream is +// hashed while being passed through this reader, and on EOF it will verify +// that the hash matches the expected hash. If not, an error is returned. Note +// that this means you need to read all input to EOF in order to find +// verification errors. +// +// If Reader is a VerifiedReadCloser (with the same ExpectedDigest), all of the +// methods are just piped to the underlying methods (with no verification in +// the upper layer). +type VerifiedReadCloser struct { + // Reader is the underlying reader. + Reader io.ReadCloser + + // ExpectedDigest is the expected digest. When the underlying reader + // returns an EOF, the entire stream's sum will be compared to this hash + // and an error will be returned if they don't match. + ExpectedDigest digest.Digest + + // ExpectedSize is the expected amount of data to be read overall. If the + // underlying reader hasn't returned an EOF by the time this value is + // exceeded, an error is returned and no further reads will occur. + ExpectedSize int64 + + // digester stores the current state of the stream's hash. + digester digest.Digester + + // currentSize is the number of bytes that have been read so far. + currentSize int64 +} + +func (v *VerifiedReadCloser) init() { + // Define digester if not already set. + if v.digester == nil { + alg := v.ExpectedDigest.Algorithm() + if !alg.Available() { + log.Fatalf("verified reader: unsupported hash algorithm %s", alg) + panic("verified reader: unreachable section") // should never be hit + } + v.digester = alg.Digester() + } +} + +func (v *VerifiedReadCloser) isNoop() bool { + innerV, ok := v.Reader.(*VerifiedReadCloser) + return ok && + innerV.ExpectedDigest == v.ExpectedDigest && + innerV.ExpectedSize == v.ExpectedSize +} + +func (v *VerifiedReadCloser) verify(nilErr error) error { + // Digest mismatch (always takes precedence)? + if actualDigest := v.digester.Digest(); actualDigest != v.ExpectedDigest { + return errors.Wrapf(ErrDigestMismatch, "expected %s not %s", v.ExpectedDigest, actualDigest) + } + // Do we need to check the size for mismatches? + if v.ExpectedSize >= 0 { + switch { + // Not enough bytes in the stream. + case v.currentSize < v.ExpectedSize: + return errors.Wrapf(ErrSizeMismatch, "expected %d bytes (only %d bytes in stream)", v.ExpectedSize, v.currentSize) + + // We don't read the entire blob, so the message needs to be slightly adjusted. + case v.currentSize > v.ExpectedSize: + return errors.Wrapf(ErrSizeMismatch, "expected %d bytes (extra bytes in stream)", v.ExpectedSize) + + } + } + // Forward the provided error. + return nilErr +} + +// Read is a wrapper around VerifiedReadCloser.Reader, with a digest check on +// EOF. Make sure that you always check for EOF and read-to-the-end for all +// files. +func (v *VerifiedReadCloser) Read(p []byte) (n int, err error) { + // Make sure we don't read after v.ExpectedSize has been passed. + err = io.EOF + left := v.ExpectedSize - v.currentSize + switch { + // ExpectedSize has been disabled. + case v.ExpectedSize < 0: + n, err = v.Reader.Read(p) + + // We still have something left to read. + case left > 0: + if int64(len(p)) > left { + p = p[:left] + } + // Piped to the underling read. + n, err = v.Reader.Read(p) + v.currentSize += int64(n) + + // We have either read everything, or just happened to land on a boundary + // (with potentially more things afterwards). So we must check if there is + // anything left by doing a 1-byte read (Go doesn't allow for zero-length + // Read()s to give EOFs). + case left == 0: + // We just want to know whether we read something (n>0). #nosec G104 + nTmp, _ := v.Reader.Read(make([]byte, 1)) + v.currentSize += int64(nTmp) + } + // Are we going to be a noop? + if v.isNoop() { + return n, err + } + // Make sure we're ready. + v.init() + // Forward it to the digester. + if n > 0 { + // hash.Hash guarantees Write() never fails and is never short. + nWrite, err := v.digester.Hash().Write(p[:n]) + if nWrite != n || err != nil { + log.Fatalf("verified reader: short write to %s Digester (err=%v)", v.ExpectedDigest.Algorithm(), err) + panic("verified reader: unreachable section") // should never be hit + } + } + // We have finished reading -- let's verify the state! + if errors.Cause(err) == io.EOF { + err = v.verify(err) + } + return n, err +} + +// Close is a wrapper around VerifiedReadCloser.Reader, but with a digest check +// which will return an error if the underlying Close() didn't. +func (v *VerifiedReadCloser) Close() error { + // Piped to underlying close. + err := v.Reader.Close() + if err != nil { + return err + } + // Are we going to be a noop? + if v.isNoop() { + return err + } + // Make sure we're ready. + v.init() + // Verify the state. + return v.verify(nil) +} diff --git a/vendor/github.com/openSUSE/umoci/pkg/idtools/idtools.go b/vendor/github.com/openSUSE/umoci/pkg/idtools/idtools.go new file mode 100644 index 0000000000..6ea5a206dc --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/pkg/idtools/idtools.go @@ -0,0 +1,98 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package idtools + +import ( + "strconv" + "strings" + + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// ToHost translates a remapped container ID to an unmapped host ID using the +// provided ID mapping. If no mapping is provided, then the mapping is a no-op. +// If there is no mapping for the given ID an error is returned. +func ToHost(contID int, idMap []rspec.LinuxIDMapping) (int, error) { + if idMap == nil { + return contID, nil + } + + for _, m := range idMap { + if uint32(contID) >= m.ContainerID && uint32(contID) < m.ContainerID+m.Size { + return int(m.HostID + (uint32(contID) - m.ContainerID)), nil + } + } + + return -1, errors.Errorf("container id %d cannot be mapped to a host id", contID) +} + +// ToContainer takes an unmapped host ID and translates it to a remapped +// container ID using the provided ID mapping. If no mapping is provided, then +// the mapping is a no-op. If there is no mapping for the given ID an error is +// returned. +func ToContainer(hostID int, idMap []rspec.LinuxIDMapping) (int, error) { + if idMap == nil { + return hostID, nil + } + + for _, m := range idMap { + if uint32(hostID) >= m.HostID && uint32(hostID) < m.HostID+m.Size { + return int(m.ContainerID + (uint32(hostID) - m.HostID)), nil + } + } + + return -1, errors.Errorf("host id %d cannot be mapped to a container id", hostID) +} + +// ParseMapping takes a mapping string of the form "container:host[:size]" and +// returns the corresponding rspec.LinuxIDMapping. An error is returned if not +// enough fields are provided or are otherwise invalid. The default size is 1. +func ParseMapping(spec string) (rspec.LinuxIDMapping, error) { + parts := strings.Split(spec, ":") + + var err error + var hostID, contID, size int + switch len(parts) { + case 3: + size, err = strconv.Atoi(parts[2]) + if err != nil { + return rspec.LinuxIDMapping{}, errors.Wrap(err, "invalid size in mapping") + } + case 2: + size = 1 + default: + return rspec.LinuxIDMapping{}, errors.Errorf("invalid number of fields in mapping '%s': %d", spec, len(parts)) + } + + contID, err = strconv.Atoi(parts[0]) + if err != nil { + return rspec.LinuxIDMapping{}, errors.Wrap(err, "invalid containerID in mapping") + } + + hostID, err = strconv.Atoi(parts[1]) + if err != nil { + return rspec.LinuxIDMapping{}, errors.Wrap(err, "invalid hostID in mapping") + } + + return rspec.LinuxIDMapping{ + HostID: uint32(hostID), + ContainerID: uint32(contID), + Size: uint32(size), + }, nil +} diff --git a/vendor/github.com/openSUSE/umoci/pkg/system/mknod_linux.go b/vendor/github.com/openSUSE/umoci/pkg/system/mknod_linux.go new file mode 100644 index 0000000000..4d80e5c6fc --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/pkg/system/mknod_linux.go @@ -0,0 +1,43 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package system + +import ( + "archive/tar" + + "golang.org/x/sys/unix" +) + +// Tarmode takes a Typeflag (from a tar.Header for example) and returns the +// corresponding os.Filemode bit. Unknown typeflags are treated like regular +// files. +func Tarmode(typeflag byte) uint32 { + switch typeflag { + case tar.TypeSymlink: + return unix.S_IFLNK + case tar.TypeChar: + return unix.S_IFCHR + case tar.TypeBlock: + return unix.S_IFBLK + case tar.TypeFifo: + return unix.S_IFIFO + case tar.TypeDir: + return unix.S_IFDIR + } + return 0 +} diff --git a/vendor/github.com/openSUSE/umoci/pkg/system/utime_linux.go b/vendor/github.com/openSUSE/umoci/pkg/system/utime_linux.go new file mode 100644 index 0000000000..592b82cc3f --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/pkg/system/utime_linux.go @@ -0,0 +1,41 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package system + +import ( + "os" + "time" + + "golang.org/x/sys/unix" +) + +// Lutimes is a wrapper around utimensat(2), with the AT_SYMLINK_NOFOLLOW flag +// set, to allow changing the time of a symlink rather than the file it points +// to. +func Lutimes(path string, atime, mtime time.Time) error { + times := []unix.Timespec{ + unix.NsecToTimespec(atime.UnixNano()), + unix.NsecToTimespec(mtime.UnixNano()), + } + + err := unix.UtimesNanoAt(unix.AT_FDCWD, path, times, unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + return &os.PathError{Op: "lutimes", Path: path, Err: err} + } + return nil +} diff --git a/vendor/github.com/openSUSE/umoci/pkg/system/xattr_linux.go b/vendor/github.com/openSUSE/umoci/pkg/system/xattr_linux.go new file mode 100644 index 0000000000..8ee5fb132f --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/pkg/system/xattr_linux.go @@ -0,0 +1,120 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package system + +import ( + "bytes" + "os" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// Llistxattr is a wrapper around unix.Llistattr, to abstract the NUL-splitting +// and resizing of the returned []string. +func Llistxattr(path string) ([]string, error) { + var buffer []byte + for { + // Find the size. + sz, err := unix.Llistxattr(path, nil) + if err != nil { + // Could not get the size. + return nil, err + } + buffer = make([]byte, sz) + + // Get the buffer. + _, err = unix.Llistxattr(path, buffer) + if err != nil { + // If we got an ERANGE then we have to resize the buffer because + // someone raced with us getting the list. Don't you just love C + // interfaces. + if err == unix.ERANGE { + continue + } + return nil, err + } + + break + } + + // Split the buffer. + var xattrs []string + for _, name := range bytes.Split(buffer, []byte{'\x00'}) { + // "" is not a valid xattr (weirdly you get ERANGE -- not EINVAL -- if + // you try to touch it). So just skip it. + if len(name) == 0 { + continue + } + xattrs = append(xattrs, string(name)) + } + return xattrs, nil +} + +// Lgetxattr is a wrapper around unix.Lgetattr, to abstract the resizing of the +// returned []string. +func Lgetxattr(path string, name string) ([]byte, error) { + var buffer []byte + for { + // Find the size. + sz, err := unix.Lgetxattr(path, name, nil) + if err != nil { + // Could not get the size. + return nil, err + } + buffer = make([]byte, sz) + + // Get the buffer. + _, err = unix.Lgetxattr(path, name, buffer) + if err != nil { + // If we got an ERANGE then we have to resize the buffer because + // someone raced with us getting the list. Don't you just love C + // interfaces. + if err == unix.ERANGE { + continue + } + return nil, err + } + + break + } + return buffer, nil +} + +// Lclearxattrs is a wrapper around Llistxattr and Lremovexattr, which attempts +// to remove all xattrs from a given file. +func Lclearxattrs(path string, except map[string]struct{}) error { + names, err := Llistxattr(path) + if err != nil { + return errors.Wrap(err, "lclearxattrs: get list") + } + for _, name := range names { + if _, skip := except[name]; skip { + continue + } + if err := unix.Lremovexattr(path, name); err != nil { + // Ignore permission errors, because hitting a permission error + // means that it's a security.* xattr label or something similar. + if os.IsPermission(errors.Cause(err)) { + continue + } + return errors.Wrap(err, "lclearxattrs: remove xattr") + } + } + return nil +} diff --git a/vendor/github.com/openSUSE/umoci/pkg/testutils/testutils.go b/vendor/github.com/openSUSE/umoci/pkg/testutils/testutils.go new file mode 100644 index 0000000000..79782609b7 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/pkg/testutils/testutils.go @@ -0,0 +1,40 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package testutils + +// binaryType is set during umoci.cover building. +var binaryType = releaseBinary + +// IsTestBinary returns whether the current binary is a test binary. This is +// only ever meant to be used so that test-specific initialisations can be done +// inside packages. Don't use it for anything else. +func IsTestBinary() bool { + return binaryType == testBinary +} + +const ( + testBinary = "test" + releaseBinary = "release" +) + +// Sanity check. +func init() { + if binaryType != releaseBinary && binaryType != testBinary { + panic("BinaryType is not release or test.") + } +} diff --git a/vendor/github.com/openSUSE/umoci/pkg/unpriv/unpriv.go b/vendor/github.com/openSUSE/umoci/pkg/unpriv/unpriv.go new file mode 100644 index 0000000000..23fce1ae7b --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/pkg/unpriv/unpriv.go @@ -0,0 +1,588 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package unpriv + +import ( + "archive/tar" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/cyphar/filepath-securejoin" + "github.com/openSUSE/umoci/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// fiRestore restores the state given by an os.FileInfo instance at the given +// path by ensuring that an Lstat(path) will return as-close-to the same +// os.FileInfo. +// +// #nosec G104 +func fiRestore(path string, fi os.FileInfo) { + // archive/tar handles the OS-specific syscall stuff required to get atime + // and mtime information for a file. + hdr, _ := tar.FileInfoHeader(fi, "") + + // Apply the relevant information from the FileInfo. + // XXX: Should we return errors here to ensure that everything is + // deterministic or we fail? + os.Chmod(path, fi.Mode()) + os.Chtimes(path, hdr.AccessTime, hdr.ModTime) +} + +// splitpath splits the given path into each of the path components. +func splitpath(path string) []string { + path = filepath.Clean(path) + parts := strings.Split(path, string(os.PathSeparator)) + if filepath.IsAbs(path) { + parts = append([]string{string(os.PathSeparator)}, parts...) + } + return parts +} + +// WrapFunc is a function that can be passed to Wrap. It takes a path (and +// presumably operates on it -- since Wrap only ensures that the path given is +// resolvable) and returns some form of error. +type WrapFunc func(path string) error + +// Wrap will wrap a given function, and call it in a context where all of the +// parent directories in the given path argument are such that the path can be +// resolved (you may need to make your own changes to the path to make it +// readable). Note that the provided function may be called several times, and +// if the error returned is such that !os.IsPermission(err), then no trickery +// will be performed. If fn returns an error, so will this function. All of the +// trickery is reverted when this function returns (which is when fn returns). +func Wrap(path string, fn WrapFunc) error { + // FIXME: Should we be calling fn() here first? + if err := fn(path); err == nil || !os.IsPermission(errors.Cause(err)) { + return err + } + + // We need to chown all of the path components we don't have execute rights + // to. Specifically these are the path components which are parents of path + // components we cannot stat. However, we must make sure to not touch the + // path itself. + parts := splitpath(filepath.Dir(path)) + start := len(parts) + for { + current := filepath.Join(parts[:start]...) + _, err := os.Lstat(current) + if err == nil { + // We've hit the first element we can chown. + break + } + if !os.IsPermission(err) { + // This is a legitimate error. + return errors.Wrapf(err, "unpriv.wrap: lstat parent: %s", current) + } + start-- + } + // Chown from the top down. + for i := start; i <= len(parts); i++ { + current := filepath.Join(parts[:i]...) + fi, err := os.Lstat(current) + if err != nil { + return errors.Wrapf(err, "unpriv.wrap: lstat parent: %s", current) + } + // Add +rwx permissions to directories. If we have the access to change + // the mode at all then we are the user owner (not just a group owner). + if err := os.Chmod(current, fi.Mode()|0700); err != nil { + return errors.Wrapf(err, "unpriv.wrap: chmod parent: %s", current) + } + defer fiRestore(current, fi) + } + + // Everything is wrapped. Return from this nightmare. + return fn(path) +} + +// Open is a wrapper around os.Open which has been wrapped with unpriv.Wrap to +// make it possible to open paths even if you do not currently have read +// permission. Note that the returned file handle references a path that you do +// not have read access to (since all changes are reverted when this function +// returns), so attempts to do Readdir() or similar functions that require +// doing lstat(2) may fail. +func Open(path string) (*os.File, error) { + var fh *os.File + err := Wrap(path, func(path string) error { + // Get information so we can revert it. + fi, err := os.Lstat(path) + if err != nil { + return errors.Wrap(err, "lstat file") + } + + // Add +r permissions to the file. + if err := os.Chmod(path, fi.Mode()|0400); err != nil { + return errors.Wrap(err, "chmod +r") + } + defer fiRestore(path, fi) + + // Open the damn thing. + fh, err = os.Open(path) + return err + }) + return fh, errors.Wrap(err, "unpriv.open") +} + +// Create is a wrapper around os.Create which has been wrapped with unpriv.Wrap +// to make it possible to create paths even if you do not currently have read +// permission. Note that the returned file handle references a path that you do +// not have read access to (since all changes are reverted when this function +// returns). +func Create(path string) (*os.File, error) { + var fh *os.File + err := Wrap(path, func(path string) error { + var err error + fh, err = os.Create(path) + return err + }) + return fh, errors.Wrap(err, "unpriv.create") +} + +// Readdir is a wrapper around (*os.File).Readdir which has been wrapper with +// unpriv.Wrap to make it possible to get []os.FileInfo for the set of children +// of the provided directory path. The interface for this is quite different to +// (*os.File).Readdir because we have to have a proper filesystem path in order +// to get the set of child FileInfos (because all of the child paths need to be +// resolveable). +func Readdir(path string) ([]os.FileInfo, error) { + var infos []os.FileInfo + err := Wrap(path, func(path string) error { + // Get information so we can revert it. + fi, err := os.Lstat(path) + if err != nil { + return errors.Wrap(err, "lstat dir") + } + + // Add +rx permissions to the file. + if err := os.Chmod(path, fi.Mode()|0500); err != nil { + return errors.Wrap(err, "chmod +rx") + } + defer fiRestore(path, fi) + + // Open the damn thing. + fh, err := os.Open(path) + if err != nil { + return errors.Wrap(err, "opendir") + } + defer fh.Close() + + // Get the set of dirents. + infos, err = fh.Readdir(-1) + return err + }) + return infos, errors.Wrap(err, "unpriv.readdir") +} + +// Lstat is a wrapper around os.Lstat which has been wrapped with unpriv.Wrap +// to make it possible to get os.FileInfo about a path even if you do not +// currently have the required mode bits set to resolve the path. Note that you +// may not have resolve access after this function returns because all of the +// trickery is reverted by unpriv.Wrap. +func Lstat(path string) (os.FileInfo, error) { + var fi os.FileInfo + err := Wrap(path, func(path string) error { + // Fairly simple. + var err error + fi, err = os.Lstat(path) + return err + }) + return fi, errors.Wrap(err, "unpriv.lstat") +} + +// Lstatx is like Lstat but uses unix.Lstat and returns unix.Stat_t instead +func Lstatx(path string) (unix.Stat_t, error) { + var s unix.Stat_t + err := Wrap(path, func(path string) error { + return unix.Lstat(path, &s) + }) + return s, errors.Wrap(err, "unpriv.lstatx") +} + +// Readlink is a wrapper around os.Readlink which has been wrapped with +// unpriv.Wrap to make it possible to get the linkname of a symlink even if you +// do not currently have teh required mode bits set to resolve the path. Note +// that you may not have resolve access after this function returns because all +// of this trickery is reverted by unpriv.Wrap. +func Readlink(path string) (string, error) { + var linkname string + err := Wrap(path, func(path string) error { + // Fairly simple. + var err error + linkname, err = os.Readlink(path) + return err + }) + return linkname, errors.Wrap(err, "unpriv.readlink") +} + +// Symlink is a wrapper around os.Symlink which has been wrapped with +// unpriv.Wrap to make it possible to create a symlink even if you do not +// currently have the required access bits to create the symlink. Note that you +// may not have resolve access after this function returns because all of the +// trickery is reverted by unpriv.Wrap. +func Symlink(linkname, path string) error { + return errors.Wrap(Wrap(path, func(path string) error { + return os.Symlink(linkname, path) + }), "unpriv.symlink") +} + +// Link is a wrapper around os.Link which has been wrapped with unpriv.Wrap to +// make it possible to create a hard link even if you do not currently have the +// required access bits to create the hard link. Note that you may not have +// resolve access after this function returns because all of the trickery is +// reverted by unpriv.Wrap. +func Link(linkname, path string) error { + return errors.Wrap(Wrap(path, func(path string) error { + // We have to double-wrap this, because you need search access to the + // linkname. This is safe because any common ancestors will be reverted + // in reverse call stack order. + return errors.Wrap(Wrap(linkname, func(linkname string) error { + return os.Link(linkname, path) + }), "unpriv.wrap linkname") + }), "unpriv.link") +} + +// Chmod is a wrapper around os.Chmod which has been wrapped with unpriv.Wrap +// to make it possible to change the permission bits of a path even if you do +// not currently have the required access bits to access the path. +func Chmod(path string, mode os.FileMode) error { + return errors.Wrap(Wrap(path, func(path string) error { + return os.Chmod(path, mode) + }), "unpriv.chmod") +} + +// Lchown is a wrapper around os.Lchown which has been wrapped with unpriv.Wrap +// to make it possible to change the owner of a path even if you do not +// currently have the required access bits to access the path. Note that this +// function is not particularly useful in most rootless scenarios. +// +// FIXME: This probably should be removed because it's questionably useful. +func Lchown(path string, uid, gid int) error { + return errors.Wrap(Wrap(path, func(path string) error { + return os.Lchown(path, uid, gid) + }), "unpriv.lchown") +} + +// Chtimes is a wrapper around os.Chtimes which has been wrapped with +// unpriv.Wrap to make it possible to change the modified times of a path even +// if you do not currently have the required access bits to access the path. +func Chtimes(path string, atime, mtime time.Time) error { + return errors.Wrap(Wrap(path, func(path string) error { + return os.Chtimes(path, atime, mtime) + }), "unpriv.chtimes") +} + +// Lutimes is a wrapper around system.Lutimes which has been wrapped with +// unpriv.Wrap to make it possible to change the modified times of a path even +// if you do no currently have the required access bits to access the path. +func Lutimes(path string, atime, mtime time.Time) error { + return errors.Wrap(Wrap(path, func(path string) error { + return system.Lutimes(path, atime, mtime) + }), "unpriv.lutimes") +} + +// Remove is a wrapper around os.Remove which has been wrapped with unpriv.Wrap +// to make it possible to remove a path even if you do not currently have the +// required access bits to modify or resolve the path. +func Remove(path string) error { + return errors.Wrap(Wrap(path, os.Remove), "unpriv.remove") +} + +// foreachSubpath executes WrapFunc for each child of the given path (not +// including the path itself). If path is not a directory, then WrapFunc will +// not be called and no error will be returned. This should be called within a +// context where path has already been made resolveable, however the . If WrapFunc returns an +// error, the first error is returned and iteration is halted. +func foreachSubpath(path string, wrapFn WrapFunc) error { + // Is the path a directory? + fi, err := os.Lstat(path) + if err != nil { + return errors.WithStack(err) + } + if !fi.IsDir() { + return nil + } + + // Open the directory. + fd, err := Open(path) + if err != nil { + return errors.WithStack(err) + } + defer fd.Close() + + // We need to change the mode to Readdirnames. We don't need to worry about + // permissions because we're already in a context with filepath.Dir(path) + // is at least a+rx. However, because we are calling wrapFn we need to + // restore the original mode immediately. + // #nosec G104 + _ = os.Chmod(path, fi.Mode()|0444) + names, err := fd.Readdirnames(-1) + fiRestore(path, fi) + if err != nil { + return errors.WithStack(err) + } + + // Make iteration order consistent. + sort.Strings(names) + + // Call on all the sub-directories. We run it in a Wrap context to ensure + // that the path we pass is resolveable when executed. + for _, name := range names { + subpath := filepath.Join(path, name) + if err := Wrap(subpath, wrapFn); err != nil { + return err + } + } + return nil +} + +// RemoveAll is similar to os.RemoveAll but with all of the internal functions +// wrapped with unpriv.Wrap to make it possible to remove a path (even if it +// has child paths) even if you do not currently have enough access bits. +func RemoveAll(path string) error { + return errors.Wrap(Wrap(path, func(path string) error { + // If remove works, we're done. + err := os.Remove(path) + if err == nil || os.IsNotExist(errors.Cause(err)) { + return nil + } + + // Is this a directory? + fi, serr := os.Lstat(path) + if serr != nil { + // Use securejoin's IsNotExist to handle ENOTDIR sanely. + if securejoin.IsNotExist(errors.Cause(serr)) { + serr = nil + } + return errors.Wrap(serr, "lstat") + } + // Return error from remove if it's not a directory. + if !fi.IsDir() { + return errors.Wrap(err, "remove non-directory") + } + err = nil + + err1 := foreachSubpath(path, func(subpath string) error { + err2 := RemoveAll(subpath) + if err == nil { + err = err2 + } + return nil + }) + if err1 != nil { + // We must have hit a race, but we don't care. + if os.IsNotExist(errors.Cause(err1)) { + err1 = nil + } + return errors.Wrap(err1, "foreach subpath") + } + + // Remove the directory. This should now work. + err1 = os.Remove(path) + if err1 == nil || os.IsNotExist(errors.Cause(err1)) { + return nil + } + if err == nil { + err = err1 + } + return errors.Wrap(err, "remove") + }), "unpriv.removeall") +} + +// Mkdir is a wrapper around os.Mkdir which has been wrapped with unpriv.Wrap +// to make it possible to remove a path even if you do not currently have the +// required access bits to modify or resolve the path. +func Mkdir(path string, perm os.FileMode) error { + return errors.Wrap(Wrap(path, func(path string) error { + return os.Mkdir(path, perm) + }), "unpriv.mkdir") +} + +// MkdirAll is similar to os.MkdirAll but in order to implement it properly all +// of the internal functions were wrapped with unpriv.Wrap to make it possible +// to create a path even if you do not currently have enough access bits. +func MkdirAll(path string, perm os.FileMode) error { + return errors.Wrap(Wrap(path, func(path string) error { + // Check whether the path already exists. + fi, err := os.Stat(path) + if err == nil { + if fi.IsDir() { + return nil + } + return &os.PathError{Op: "mkdir", Path: path, Err: unix.ENOTDIR} + } + + // Create parent. + parent := filepath.Dir(path) + if parent != "." && parent != "/" { + err = MkdirAll(parent, perm) + if err != nil { + return err + } + } + + // Parent exists, now we can create the path. + err = os.Mkdir(path, perm) + if err != nil { + // Handle "foo/.". + fi, err1 := os.Lstat(path) + if err1 == nil && fi.IsDir() { + return nil + } + return err + } + return nil + }), "unpriv.mkdirall") +} + +// Mknod is a wrapper around unix.Mknod which has been wrapped with unpriv.Wrap +// to make it possible to remove a path even if you do not currently have the +// required access bits to modify or resolve the path. +func Mknod(path string, mode os.FileMode, dev uint64) error { + return errors.Wrap(Wrap(path, func(path string) error { + return unix.Mknod(path, uint32(mode), int(dev)) + }), "unpriv.mknod") +} + +// Llistxattr is a wrapper around system.Llistxattr which has been wrapped with +// unpriv.Wrap to make it possible to remove a path even if you do not +// currently have the required access bits to resolve the path. +func Llistxattr(path string) ([]string, error) { + var xattrs []string + err := Wrap(path, func(path string) error { + var err error + xattrs, err = system.Llistxattr(path) + return err + }) + return xattrs, errors.Wrap(err, "unpriv.llistxattr") +} + +// Lremovexattr is a wrapper around system.Lremovexattr which has been wrapped +// with unpriv.Wrap to make it possible to remove a path even if you do not +// currently have the required access bits to resolve the path. +func Lremovexattr(path, name string) error { + return errors.Wrap(Wrap(path, func(path string) error { + return unix.Lremovexattr(path, name) + }), "unpriv.lremovexattr") +} + +// Lsetxattr is a wrapper around system.Lsetxattr which has been wrapped +// with unpriv.Wrap to make it possible to set a path even if you do not +// currently have the required access bits to resolve the path. +func Lsetxattr(path, name string, value []byte, flags int) error { + return errors.Wrap(Wrap(path, func(path string) error { + return unix.Lsetxattr(path, name, value, flags) + }), "unpriv.lsetxattr") +} + +// Lgetxattr is a wrapper around system.Lgetxattr which has been wrapped +// with unpriv.Wrap to make it possible to get a path even if you do not +// currently have the required access bits to resolve the path. +func Lgetxattr(path, name string) ([]byte, error) { + var value []byte + err := Wrap(path, func(path string) error { + var err error + value, err = system.Lgetxattr(path, name) + return err + }) + return value, errors.Wrap(err, "unpriv.lgetxattr") +} + +// Lclearxattrs is similar to system.Lclearxattrs but in order to implement it +// properly all of the internal functions were wrapped with unpriv.Wrap to make +// it possible to create a path even if you do not currently have enough access +// bits. +func Lclearxattrs(path string, except map[string]struct{}) error { + return errors.Wrap(Wrap(path, func(path string) error { + names, err := Llistxattr(path) + if err != nil { + return err + } + for _, name := range names { + if _, skip := except[name]; skip { + continue + } + if err := Lremovexattr(path, name); err != nil { + // SELinux won't let you change security.selinux (for obvious + // security reasons), so we don't clear xattrs if attempting to + // clear them causes an EPERM. This EPERM will not be due to + // resolution issues (Llistxattr already has done that for us). + if os.IsPermission(errors.Cause(err)) { + continue + } + return err + } + } + return nil + }), "unpriv.lclearxattrs") +} + +// walk is the inner implementation of Walk. +func walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // Always run walkFn first. If we're not a directory there's no children to + // iterate over and so we bail even if there wasn't an error. + err := walkFn(path, info, nil) + if !info.IsDir() || err != nil { + return err + } + + // Now just execute walkFn over each subpath. + return foreachSubpath(path, func(subpath string) error { + info, err := Lstat(subpath) + if err != nil { + // If it doesn't exist, just pass it directly to walkFn. + if err := walkFn(subpath, info, err); err != nil { + // Ignore SkipDir. + if errors.Cause(err) != filepath.SkipDir { + return err + } + } + } else { + if err := walk(subpath, info, walkFn); err != nil { + // Ignore error if it's SkipDir and subpath is a directory. + if !(info.IsDir() && errors.Cause(err) == filepath.SkipDir) { + return err + } + } + } + return nil + }) +} + +// Walk is a reimplementation of filepath.Walk, wrapping all of the relevant +// function calls with Wrap, allowing you to walk over a tree even in the face +// of multiple nested cases where paths are not normally accessible. The +// os.FileInfo passed to walkFn is the "pristine" version (as opposed to the +// currently-on-disk version that may have been temporarily modified by Wrap). +func Walk(root string, walkFn filepath.WalkFunc) error { + return Wrap(root, func(root string) error { + info, err := Lstat(root) + if err != nil { + err = walkFn(root, nil, err) + } else { + err = walk(root, info, walkFn) + } + if errors.Cause(err) == filepath.SkipDir { + err = nil + } + return errors.Wrap(err, "unpriv.walk") + }) +} diff --git a/vendor/github.com/opencontainers/image-tools/LICENSE b/vendor/github.com/openSUSE/umoci/third_party/shared/COPYING similarity index 99% rename from vendor/github.com/opencontainers/image-tools/LICENSE rename to vendor/github.com/openSUSE/umoci/third_party/shared/COPYING index 8dada3edaf..d645695673 100644 --- a/vendor/github.com/opencontainers/image-tools/LICENSE +++ b/vendor/github.com/openSUSE/umoci/third_party/shared/COPYING @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +179,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/openSUSE/umoci/third_party/shared/util.go b/vendor/github.com/openSUSE/umoci/third_party/shared/util.go new file mode 100644 index 0000000000..fca447df7a --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/third_party/shared/util.go @@ -0,0 +1,55 @@ +/* + * lxd: daemon based on liblxd with a REST API + * Copyright (C) 2015-2017 LXD Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This code was copied from https://github.com/lxc/lxd, which is available +// under the the Apache 2.0 license (as noted above). The version of this code +// comes from the tag lxd-2.21 at /shared/util.go. + +package shared + +import ( + "bufio" + "fmt" + "os" +) + +// RunningInUserNS returns whether the current process is (likely) inside a +// user namespace. This has a possible false-negative (where it will return +// false while inside a user namespace if it was intentionally configured to be +// confusing to programs like this). +func RunningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + line := string(l) + var a, b, c int64 + // #nosec G104 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + if a == 0 && b == 0 && c == 4294967295 { + return false + } + return true +} diff --git a/vendor/github.com/openSUSE/umoci/third_party/user/LICENSE b/vendor/github.com/openSUSE/umoci/third_party/user/LICENSE new file mode 100644 index 0000000000..27448585ad --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/third_party/user/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openSUSE/umoci/third_party/user/NOTICE b/vendor/github.com/openSUSE/umoci/third_party/user/NOTICE new file mode 100644 index 0000000000..c29775c0d9 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/third_party/user/NOTICE @@ -0,0 +1,17 @@ +runc + +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/openSUSE/umoci/third_party/user/README.md b/vendor/github.com/openSUSE/umoci/third_party/user/README.md new file mode 100644 index 0000000000..96bf31f86b --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/third_party/user/README.md @@ -0,0 +1,7 @@ +## `third_party/user` ## +![License: Apache 2.0](https://img.shields.io/github/license/opencontainers/runc.svg) + +This package is imported from +`github.com/opencontainers/runc/libcontainer/user`. Currently I can't import +runC directly because it involves importing `docker` and also causes version +conflicts. I'm not in the mood to fix that. diff --git a/vendor/github.com/openSUSE/umoci/third_party/user/lookup.go b/vendor/github.com/openSUSE/umoci/third_party/user/lookup.go new file mode 100644 index 0000000000..911d19047e --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/third_party/user/lookup.go @@ -0,0 +1,128 @@ +/* + * Imported from opencontainers/runc/libcontainer/user. + * Copyright (C) 2014 Docker, Inc. + * Copyright (C) The Linux Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package user + +import ( + "errors" + "syscall" +) + +var ( + // The current operating system does not provide the required data for user lookups. + ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") + // No matching entries found in file. + ErrNoPasswdEntries = errors.New("no matching entries in passwd file") + ErrNoGroupEntries = errors.New("no matching entries in group file") +) + +func lookupUser(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, ErrNoPasswdEntries + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(syscall.Getuid()) +} + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUser(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUser(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupGroup(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, ErrNoGroupEntries + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(syscall.Getgid()) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Gid == gid + }) +} diff --git a/vendor/github.com/openSUSE/umoci/third_party/user/lookup_unix.go b/vendor/github.com/openSUSE/umoci/third_party/user/lookup_unix.go new file mode 100644 index 0000000000..782c4b1845 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/third_party/user/lookup_unix.go @@ -0,0 +1,48 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +/* + * Imported from opencontainers/runc/libcontainer/user. + * Copyright (C) 2014 Docker, Inc. + * Copyright (C) The Linux Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package user + +import ( + "io" + "os" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdPath = "/etc/passwd" + unixGroupPath = "/etc/group" +) + +func GetPasswdPath() (string, error) { + return unixPasswdPath, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdPath) +} + +func GetGroupPath() (string, error) { + return unixGroupPath, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupPath) +} diff --git a/vendor/github.com/openSUSE/umoci/third_party/user/lookup_unsupported.go b/vendor/github.com/openSUSE/umoci/third_party/user/lookup_unsupported.go new file mode 100644 index 0000000000..005ee2d614 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/third_party/user/lookup_unsupported.go @@ -0,0 +1,39 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +/* + * Imported from opencontainers/runc/libcontainer/user. + * Copyright (C) 2014 Docker, Inc. + * Copyright (C) The Linux Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package user + +import "io" + +func GetPasswdPath() (string, error) { + return "", ErrUnsupported +} + +func GetPasswd() (io.ReadCloser, error) { + return nil, ErrUnsupported +} + +func GetGroupPath() (string, error) { + return "", ErrUnsupported +} + +func GetGroup() (io.ReadCloser, error) { + return nil, ErrUnsupported +} diff --git a/vendor/github.com/openSUSE/umoci/third_party/user/user.go b/vendor/github.com/openSUSE/umoci/third_party/user/user.go new file mode 100644 index 0000000000..fc61ac5742 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/third_party/user/user.go @@ -0,0 +1,460 @@ +/* + * Imported from opencontainers/runc/libcontainer/user. + * Copyright (C) 2014 Docker, Inc. + * Copyright (C) The Linux Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package user + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + minId = 0 + maxId = 1<<31 - 1 //for 32-bit systems compatibility +) + +var ( + ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +func parseLine(line string, v ...interface{}) { + if line == "" { + return + } + + parts := strings.Split(line, ":") + for i, p := range parts { + // Ignore cases where we don't have enough fields to populate the arguments. + // Some configuration files like to misbehave. + if len(v) <= i { + break + } + + // Use the type of the argument to figure out how to parse it, scanf() style. + // This is legit. + switch e := v[i].(type) { + case *string: + *e = p + case *int: + // "numbers", with conversion errors ignored because of some misbehaving configuration files. + // #nosec G104 + *e, _ = strconv.Atoi(p) + case *[]string: + // Comma-separated lists. + if p != "" { + *e = strings.Split(p, ",") + } else { + *e = []string{} + } + default: + // Someone goof'd when writing code using this function. Scream so they can hear us. + panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e)) + } + } +} + +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswd(passwd) +} + +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, fmt.Errorf("nil source for passwd-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []User{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := User{} + parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + + defer group.Close() + return ParseGroup(group) +} + +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, fmt.Errorf("nil source for group-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []Group{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + if text == "" { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := Group{} + parseLine(text, &p.Name, &p.Pass, &p.Gid, &p.List) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +type ExecUser struct { + Uid int + Gid int + Sgids []int + Home string +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + passwd, err := os.Open(passwdPath) + if err != nil { + passwd = nil + } else { + defer passwd.Close() + } + + group, err := os.Open(groupPath) + if err != nil { + group = nil + } else { + defer group.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// * "" +// * "user" +// * "uid" +// * "user:group" +// * "uid:gid +// * "user:gid" +// * "uid:group" +// +// It should be noted that if you specify a numeric user or group id, they will +// not be evaluated as usernames (only the metadata will be filled). So attempting +// to parse a user with user.Name = "1337" will produce the user with a UID of +// 1337. +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + + // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax + var userArg, groupArg string + parseLine(userSpec, &userArg, &groupArg) + + // Convert userArg and groupArg to be numeric, so we don't have to execute + // Atoi *twice* for each iteration over lines. + uidArg, uidErr := strconv.Atoi(userArg) + gidArg, gidErr := strconv.Atoi(groupArg) + + // Find the matching user. + users, err := ParsePasswdFilter(passwd, func(u User) bool { + if userArg == "" { + // Default to current state of the user. + return u.Uid == user.Uid + } + + if uidErr == nil { + // If the userArg is numeric, always treat it as a UID. + return uidArg == u.Uid + } + + return u.Name == userArg + }) + + // If we can't find the user, we have to bail. + if err != nil && passwd != nil { + if userArg == "" { + userArg = strconv.Itoa(user.Uid) + } + return nil, fmt.Errorf("unable to find user %s: %v", userArg, err) + } + + var matchedUserName string + if len(users) > 0 { + // First match wins, even if there's more than one matching entry. + matchedUserName = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home + } else if userArg != "" { + // If we can't find a user with the given username, the only other valid + // option is if it's a numeric username with no associated entry in passwd. + + if uidErr != nil { + // Not numeric. + return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries) + } + user.Uid = uidArg + + // Must be inside valid uid range. + if user.Uid < minId || user.Uid > maxId { + return nil, ErrRange + } + + // Okay, so it's numeric. We can just roll with this. + } + + // On to the groups. If we matched a username, we need to do this because of + // the supplementary group IDs. + if groupArg != "" || matchedUserName != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // If the group argument isn't explicit, we'll just search for it. + if groupArg == "" { + // Check if user is a member of this group. + for _, u := range g.List { + if u == matchedUserName { + return true + } + } + return false + } + + if gidErr == nil { + // If the groupArg is numeric, always treat it as a GID. + return gidArg == g.Gid + } + + return g.Name == groupArg + }) + if err != nil && group != nil { + return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err) + } + + // Only start modifying user.Gid if it is in explicit form. + if groupArg != "" { + if len(groups) > 0 { + // First match wins, even if there's more than one matching entry. + user.Gid = groups[0].Gid + } else if groupArg != "" { + // If we can't find a group with the given name, the only other valid + // option is if it's a numeric group name with no associated entry in group. + + if gidErr != nil { + // Not numeric. + return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries) + } + user.Gid = gidArg + + // Must be inside valid gid range. + if user.Gid < minId || user.Gid > maxId { + return nil, ErrRange + } + + // Okay, so it's numeric. We can just roll with this. + } + } else if len(groups) > 0 && uidErr != nil { + // Supplementary group ids only make sense if in the implicit form. + user.Sgids = make([]int, len(groups)) + for i, group := range groups { + user.Sgids[i] = group.Gid + } + } + } + + return user, nil +} + +// GetAdditionalGroups looks up a list of groups by name or group id +// against the given /etc/group formatted data. If a group name cannot +// be found, an error will be returned. If a group id cannot be found, +// or the given group data is nil, the id will be returned as-is +// provided it is in the legal range. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + var groups = []Group{} + if group != nil { + var err error + groups, err = ParseGroupFilter(group, func(g Group) bool { + for _, ag := range additionalGroups { + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + return true + } + } + return false + }) + if err != nil { + return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) + } + } + + gidMap := make(map[int]struct{}) + for _, ag := range additionalGroups { + var found bool + for _, g := range groups { + // if we found a matched group either by name or gid, take the + // first matched as correct + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + if _, ok := gidMap[g.Gid]; !ok { + gidMap[g.Gid] = struct{}{} + found = true + break + } + } + } + // we asked for a group but didn't find it. let's check to see + // if we wanted a numeric group + if !found { + gid, err := strconv.Atoi(ag) + if err != nil { + return nil, fmt.Errorf("Unable to find group %s", ag) + } + // Ensure gid is inside gid range. + if gid < minId || gid > maxId { + return nil, ErrRange + } + gidMap[gid] = struct{}{} + } + } + gids := []int{} + for gid := range gidMap { + gids = append(gids, gid) + } + return gids, nil +} + +// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups +// that opens the groupPath given and gives it as an argument to +// GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + group, err := os.Open(groupPath) + if err == nil { + defer group.Close() + } + return GetAdditionalGroups(additionalGroups, group) +} diff --git a/vendor/github.com/openSUSE/umoci/utils.go b/vendor/github.com/openSUSE/umoci/utils.go new file mode 100644 index 0000000000..cb5556c1fc --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/utils.go @@ -0,0 +1,325 @@ +/* + * umoci: Umoci Modifies Open Containers' Images + * Copyright (C) 2016, 2017, 2018 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package umoci + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "text/tabwriter" + + "github.com/apex/log" + "github.com/docker/go-units" + "github.com/openSUSE/umoci/oci/casext" + igen "github.com/openSUSE/umoci/oci/config/generate" + "github.com/openSUSE/umoci/oci/layer" + "github.com/openSUSE/umoci/pkg/idtools" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/urfave/cli" + "github.com/vbatts/go-mtree" + "golang.org/x/net/context" +) + +// FIXME: This should be moved to a library. Too much of this code is in the +// cmd/... code, but should really be refactored to the point where it +// can be useful to other people. This is _particularly_ true for the +// code which repacks images (the changes to the config, manifest and +// CAS should be made into a library). + +// MtreeKeywords is the set of keywords used by umoci for verification and diff +// generation of a bundle. This is based on mtree.DefaultKeywords, but is +// hardcoded here to ensure that vendor changes don't mess things up. +var MtreeKeywords = []mtree.Keyword{ + "size", + "type", + "uid", + "gid", + "mode", + "link", + "nlink", + "tar_time", + "sha256digest", + "xattr", +} + +// MetaName is the name of umoci's metadata file that is stored in all +// bundles extracted by umoci. +const MetaName = "umoci.json" + +// MetaVersion is the version of Meta supported by this code. The +// value is only bumped for updates which are not backwards compatible. +const MetaVersion = "2" + +// Meta represents metadata about how umoci unpacked an image to a bundle +// and other similar information. It is used to keep track of information that +// is required when repacking an image and other similar bundle information. +type Meta struct { + // Version is the version of umoci used to unpack the bundle. This is used + // to future-proof the umoci.json information. + Version string `json:"umoci_version"` + + // From is a copy of the descriptor pointing to the image manifest that was + // used to unpack the bundle. Essentially it's a resolved form of the + // --image argument to umoci-unpack(1). + From casext.DescriptorPath `json:"from_descriptor_path"` + + // MapOptions is the parsed version of --uid-map, --gid-map and --rootless + // arguments to umoci-unpack(1). While all of these options technically do + // not need to be the same for corresponding umoci-unpack(1) and + // umoci-repack(1) calls, changing them is not recommended and so the + // default should be that they are the same. + MapOptions layer.MapOptions `json:"map_options"` +} + +// WriteTo writes a JSON-serialised version of Meta to the given io.Writer. +func (m Meta) WriteTo(w io.Writer) (int64, error) { + buf := new(bytes.Buffer) + err := json.NewEncoder(io.MultiWriter(buf, w)).Encode(m) + return int64(buf.Len()), err +} + +// WriteBundleMeta writes an umoci.json file to the given bundle path. +func WriteBundleMeta(bundle string, meta Meta) error { + fh, err := os.Create(filepath.Join(bundle, MetaName)) + if err != nil { + return errors.Wrap(err, "create metadata") + } + defer fh.Close() + + _, err = meta.WriteTo(fh) + return errors.Wrap(err, "write metadata") +} + +// ReadBundleMeta reads and parses the umoci.json file from a given bundle path. +func ReadBundleMeta(bundle string) (Meta, error) { + var meta Meta + + fh, err := os.Open(filepath.Join(bundle, MetaName)) + if err != nil { + return meta, errors.Wrap(err, "open metadata") + } + defer fh.Close() + + err = json.NewDecoder(fh).Decode(&meta) + if meta.Version != MetaVersion { + if err == nil { + err = fmt.Errorf("unsupported umoci.json version: %s", meta.Version) + } + } + return meta, errors.Wrap(err, "decode metadata") +} + +// ManifestStat has information about a given OCI manifest. +// TODO: Implement support for manifest lists, this should also be able to +// contain stat information for a list of manifests. +type ManifestStat struct { + // TODO: Flesh this out. Currently it's only really being used to get an + // equivalent of docker-history(1). We really need to add more + // information about it. + + // History stores the history information for the manifest. + History []historyStat `json:"history"` +} + +// Format formats a ManifestStat using the default formatting, and writes the +// result to the given writer. +// TODO: This should really be implemented in a way that allows for users to +// define their own custom templates for different blocks (meaning that +// this should use text/template rather than using tabwriters manually. +func (ms ManifestStat) Format(w io.Writer) error { + // Output history information. + tw := tabwriter.NewWriter(w, 4, 2, 1, ' ', 0) + fmt.Fprintf(tw, "LAYER\tCREATED\tCREATED BY\tSIZE\tCOMMENT\n") + for _, histEntry := range ms.History { + var ( + created = strings.Replace(histEntry.Created.Format(igen.ISO8601), "\t", " ", -1) + createdBy = strings.Replace(histEntry.CreatedBy, "\t", " ", -1) + comment = strings.Replace(histEntry.Comment, "\t", " ", -1) + layerID = "" + size = "" + ) + + if !histEntry.EmptyLayer { + layerID = histEntry.Layer.Digest.String() + size = units.HumanSize(float64(histEntry.Layer.Size)) + } + + // TODO: We need to truncate some of the fields. + fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\n", layerID, created, createdBy, size, comment) + } + return tw.Flush() +} + +// historyStat contains information about a single entry in the history of a +// manifest. This is essentially equivalent to a single record from +// docker-history(1). +type historyStat struct { + // Layer is the descriptor referencing where the layer is stored. If it is + // nil, then this entry is an empty_layer (and thus doesn't have a backing + // diff layer). + Layer *ispec.Descriptor `json:"layer"` + + // DiffID is an additional piece of information to Layer. It stores the + // DiffID of the given layer corresponding to the history entry. If DiffID + // is "", then this entry is an empty_layer. + DiffID string `json:"diff_id"` + + // History is embedded in the stat information. + ispec.History +} + +// Stat computes the ManifestStat for a given manifest blob. The provided +// descriptor must refer to an OCI Manifest. +func Stat(ctx context.Context, engine casext.Engine, manifestDescriptor ispec.Descriptor) (ManifestStat, error) { + var stat ManifestStat + + if manifestDescriptor.MediaType != ispec.MediaTypeImageManifest { + return stat, errors.Errorf("stat: cannot stat a non-manifest descriptor: invalid media type '%s'", manifestDescriptor.MediaType) + } + + // We have to get the actual manifest. + manifestBlob, err := engine.FromDescriptor(ctx, manifestDescriptor) + if err != nil { + return stat, err + } + manifest, ok := manifestBlob.Data.(ispec.Manifest) + if !ok { + // Should _never_ be reached. + return stat, errors.Errorf("[internal error] unknown manifest blob type: %s", manifestBlob.Descriptor.MediaType) + } + + // Now get the config. + configBlob, err := engine.FromDescriptor(ctx, manifest.Config) + if err != nil { + return stat, errors.Wrap(err, "stat") + } + config, ok := configBlob.Data.(ispec.Image) + if !ok { + // Should _never_ be reached. + return stat, errors.Errorf("[internal error] unknown config blob type: %s", configBlob.Descriptor.MediaType) + } + + // TODO: This should probably be moved into separate functions. + + // Generate the history of the image. Because the config.History entries + // are in the same order as the manifest.Layer entries this is fairly + // simple. However, we only increment the layer index if a layer was + // actually generated by a history entry. + layerIdx := 0 + for _, histEntry := range config.History { + info := historyStat{ + History: histEntry, + DiffID: "", + Layer: nil, + } + + // Only fill the other information and increment layerIdx if it's a + // non-empty layer. + if !histEntry.EmptyLayer { + info.DiffID = config.RootFS.DiffIDs[layerIdx].String() + info.Layer = &manifest.Layers[layerIdx] + layerIdx++ + } + + stat.History = append(stat.History, info) + } + + return stat, nil +} + +// GenerateBundleManifest creates and writes an mtree of the rootfs in the given +// bundle path, using the supplied fsEval method +func GenerateBundleManifest(mtreeName string, bundlePath string, fsEval mtree.FsEval) error { + mtreePath := filepath.Join(bundlePath, mtreeName+".mtree") + fullRootfsPath := filepath.Join(bundlePath, layer.RootfsName) + + log.WithFields(log.Fields{ + "keywords": MtreeKeywords, + "mtree": mtreePath, + }).Debugf("umoci: generating mtree manifest") + + log.Info("computing filesystem manifest ...") + dh, err := mtree.Walk(fullRootfsPath, nil, MtreeKeywords, fsEval) + if err != nil { + return errors.Wrap(err, "generate mtree spec") + } + log.Info("... done") + + flags := os.O_CREATE | os.O_WRONLY | os.O_EXCL + fh, err := os.OpenFile(mtreePath, flags, 0644) + if err != nil { + return errors.Wrap(err, "open mtree") + } + defer fh.Close() + + log.Debugf("umoci: saving mtree manifest") + + if _, err := dh.WriteTo(fh); err != nil { + return errors.Wrap(err, "write mtree") + } + + return nil +} + +// ParseIdmapOptions sets up the mapping options for Meta, using +// the arguments specified on the command line +func ParseIdmapOptions(meta *Meta, ctx *cli.Context) error { + // We need to set mappings if we're in rootless mode. + meta.MapOptions.Rootless = ctx.Bool("rootless") + if meta.MapOptions.Rootless { + if !ctx.IsSet("uid-map") { + if err := ctx.Set("uid-map", fmt.Sprintf("0:%d:1", os.Geteuid())); err != nil { + // Should _never_ be reached. + return errors.Wrap(err, "[internal error] failure auto-setting rootless --uid-map") + } + } + if !ctx.IsSet("gid-map") { + if err := ctx.Set("gid-map", fmt.Sprintf("0:%d:1", os.Getegid())); err != nil { + // Should _never_ be reached. + return errors.Wrap(err, "[internal error] failure auto-setting rootless --gid-map") + } + } + } + + for _, uidmap := range ctx.StringSlice("uid-map") { + idMap, err := idtools.ParseMapping(uidmap) + if err != nil { + return errors.Wrapf(err, "failure parsing --uid-map %s", uidmap) + } + meta.MapOptions.UIDMappings = append(meta.MapOptions.UIDMappings, idMap) + } + for _, gidmap := range ctx.StringSlice("gid-map") { + idMap, err := idtools.ParseMapping(gidmap) + if err != nil { + return errors.Wrapf(err, "failure parsing --gid-map %s", gidmap) + } + meta.MapOptions.GIDMappings = append(meta.MapOptions.GIDMappings, idMap) + } + + log.WithFields(log.Fields{ + "map.uid": meta.MapOptions.UIDMappings, + "map.gid": meta.MapOptions.GIDMappings, + }).Debugf("parsed mappings") + + return nil +} diff --git a/vendor/github.com/opencontainers/image-spec/schema/config-schema.json b/vendor/github.com/opencontainers/image-spec/schema/config-schema.json deleted file mode 100644 index 15bccd04e3..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/config-schema.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "description": "OpenContainer Config Specification", - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "https://opencontainers.org/schema/image/config", - "type": "object", - "properties": { - "created": { - "type": "string", - "format": "date-time" - }, - "author": { - "type": "string" - }, - "architecture": { - "type": "string" - }, - "os": { - "type": "string" - }, - "config": { - "type": "object", - "properties": { - "User": { - "type": "string" - }, - "ExposedPorts": { - "$ref": "defs.json#/definitions/mapStringObject" - }, - "Env": { - "type": "array", - "items": { - "type": "string" - } - }, - "Entrypoint": { - "oneOf": [ - { - "type": "array", - "items": { - "type": "string" - } - }, - { - "type": "null" - } - ] - }, - "Cmd": { - "oneOf": [ - { - "type": "array", - "items": { - "type": "string" - } - }, - { - "type": "null" - } - ] - }, - "Volumes": { - "oneOf": [ - { - "$ref": "defs.json#/definitions/mapStringObject" - }, - { - "type": "null" - } - ] - }, - "WorkingDir": { - "type": "string" - }, - "Labels": { - "oneOf": [ - { - "$ref": "defs.json#/definitions/mapStringString" - }, - { - "type": "null" - } - ] - }, - "StopSignal": { - "type": "string" - } - } - }, - "rootfs": { - "type": "object", - "properties": { - "diff_ids": { - "type": "array", - "items": { - "type": "string" - } - }, - "type": { - "type": "string", - "enum": [ - "layers" - ] - } - }, - "required": [ - "diff_ids", - "type" - ] - }, - "history": { - "type": "array", - "items": { - "type": "object", - "properties": { - "created": { - "type": "string", - "format": "date-time" - }, - "author": { - "type": "string" - }, - "created_by": { - "type": "string" - }, - "comment": { - "type": "string" - }, - "empty_layer": { - "type": "boolean" - } - } - } - } - }, - "required": [ - "architecture", - "os", - "rootfs" - ] -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/content-descriptor.json b/vendor/github.com/opencontainers/image-spec/schema/content-descriptor.json deleted file mode 100644 index 69fcea92ec..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/content-descriptor.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "description": "OpenContainer Content Descriptor Specification", - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "https://opencontainers.org/schema/descriptor", - "type": "object", - "properties": { - "mediaType": { - "description": "the mediatype of the referenced object", - "$ref": "defs-descriptor.json#/definitions/mediaType" - }, - "size": { - "description": "the size in bytes of the referenced object", - "$ref": "defs.json#/definitions/int64" - }, - "digest": { - "description": "the cryptographic checksum digest of the object, in the pattern ':'", - "$ref": "defs-descriptor.json#/definitions/digest" - }, - "urls": { - "description": "a list of urls from which this object may be downloaded", - "$ref": "defs-descriptor.json#/definitions/urls" - }, - "annotations": { - "id": "https://opencontainers.org/schema/image/descriptor/annotations", - "$ref": "defs-descriptor.json#/definitions/annotations" - } - }, - "required": [ - "mediaType", - "size", - "digest" - ] -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/defs-descriptor.json b/vendor/github.com/opencontainers/image-spec/schema/defs-descriptor.json deleted file mode 100644 index feaea001bb..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/defs-descriptor.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "description": "Definitions particular to OpenContainer Descriptor Specification", - "definitions": { - "mediaType": { - "id": "https://opencontainers.org/schema/image/descriptor/mediaType", - "type": "string", - "pattern": "^[A-Za-z0-9][A-Za-z0-9!#$&-^_.+]{0,126}/[A-Za-z0-9][A-Za-z0-9!#$&-^_.+]{0,126}$" - }, - "digest": { - "description": "the cryptographic checksum digest of the object, in the pattern ':'", - "type": "string", - "pattern": "^[a-z0-9]+(?:[+._-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$" - }, - "urls": { - "description": "a list of urls from which this object may be downloaded", - "type": "array", - "items": { - "type": "string", - "format": "uri" - } - }, - "annotations": { - "id": "https://opencontainers.org/schema/image/descriptor/annotations", - "$ref": "defs.json#/definitions/mapStringString" - } - } -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/defs.json b/vendor/github.com/opencontainers/image-spec/schema/defs.json deleted file mode 100644 index 03cb495b86..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/defs.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "description": "Definitions used throughout the OpenContainer Specification", - "definitions": { - "int8": { - "type": "integer", - "minimum": -128, - "maximum": 127 - }, - "int16": { - "type": "integer", - "minimum": -32768, - "maximum": 32767 - }, - "int32": { - "type": "integer", - "minimum": -2147483648, - "maximum": 2147483647 - }, - "int64": { - "type": "integer", - "minimum": -9223372036854776000, - "maximum": 9223372036854776000 - }, - "uint8": { - "type": "integer", - "minimum": 0, - "maximum": 255 - }, - "uint16": { - "type": "integer", - "minimum": 0, - "maximum": 65535 - }, - "uint32": { - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "uint64": { - "type": "integer", - "minimum": 0, - "maximum": 18446744073709552000 - }, - "uint16Pointer": { - "oneOf": [ - { - "$ref": "#/definitions/uint16" - }, - { - "type": "null" - } - ] - }, - "uint64Pointer": { - "oneOf": [ - { - "$ref": "#/definitions/uint64" - }, - { - "type": "null" - } - ] - }, - "stringPointer": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "mapStringString": { - "type": "object", - "patternProperties": { - ".{1,}": { - "type": "string" - } - } - }, - "mapStringObject": { - "type": "object", - "patternProperties": { - ".{1,}": { - "type": "object" - } - } - } - } -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/doc.go b/vendor/github.com/opencontainers/image-spec/schema/doc.go deleted file mode 100644 index 5ea5914d05..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package schema defines the OCI image media types, schema definitions and validation functions. -package schema diff --git a/vendor/github.com/opencontainers/image-spec/schema/error.go b/vendor/github.com/opencontainers/image-spec/schema/error.go deleted file mode 100644 index 8b0bfc2afc..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/error.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "encoding/json" - "io" - - "go4.org/errorutil" -) - -// A SyntaxError is a description of a JSON syntax error -// including line, column and offset in the JSON file. -type SyntaxError struct { - msg string - Line, Col int - Offset int64 -} - -func (e *SyntaxError) Error() string { return e.msg } - -// WrapSyntaxError checks whether the given error is a *json.SyntaxError -// and converts it into a *schema.SyntaxError containing line/col information using the given reader. -// If the given error is not a *json.SyntaxError it is returned unchanged. -func WrapSyntaxError(r io.Reader, err error) error { - if serr, ok := err.(*json.SyntaxError); ok { - line, col, _ := errorutil.HighlightBytePosition(r, serr.Offset) - return &SyntaxError{serr.Error(), line, col, serr.Offset} - } - - return err -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/fs.go b/vendor/github.com/opencontainers/image-spec/schema/fs.go deleted file mode 100644 index b18f765684..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/fs.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by "esc -private -pkg=schema -include=.*\.json$ ."; DO NOT EDIT. - -package schema - -import ( - "bytes" - "compress/gzip" - "encoding/base64" - "io/ioutil" - "net/http" - "os" - "path" - "sync" - "time" -) - -type _escLocalFS struct{} - -var _escLocal _escLocalFS - -type _escStaticFS struct{} - -var _escStatic _escStaticFS - -type _escDirectory struct { - fs http.FileSystem - name string -} - -type _escFile struct { - compressed string - size int64 - modtime int64 - local string - isDir bool - - once sync.Once - data []byte - name string -} - -func (_escLocalFS) Open(name string) (http.File, error) { - f, present := _escData[path.Clean(name)] - if !present { - return nil, os.ErrNotExist - } - return os.Open(f.local) -} - -func (_escStaticFS) prepare(name string) (*_escFile, error) { - f, present := _escData[path.Clean(name)] - if !present { - return nil, os.ErrNotExist - } - var err error - f.once.Do(func() { - f.name = path.Base(name) - if f.size == 0 { - return - } - var gr *gzip.Reader - b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed)) - gr, err = gzip.NewReader(b64) - if err != nil { - return - } - f.data, err = ioutil.ReadAll(gr) - }) - if err != nil { - return nil, err - } - return f, nil -} - -func (fs _escStaticFS) Open(name string) (http.File, error) { - f, err := fs.prepare(name) - if err != nil { - return nil, err - } - return f.File() -} - -func (dir _escDirectory) Open(name string) (http.File, error) { - return dir.fs.Open(dir.name + name) -} - -func (f *_escFile) File() (http.File, error) { - type httpFile struct { - *bytes.Reader - *_escFile - } - return &httpFile{ - Reader: bytes.NewReader(f.data), - _escFile: f, - }, nil -} - -func (f *_escFile) Close() error { - return nil -} - -func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) { - return nil, nil -} - -func (f *_escFile) Stat() (os.FileInfo, error) { - return f, nil -} - -func (f *_escFile) Name() string { - return f.name -} - -func (f *_escFile) Size() int64 { - return f.size -} - -func (f *_escFile) Mode() os.FileMode { - return 0 -} - -func (f *_escFile) ModTime() time.Time { - return time.Unix(f.modtime, 0) -} - -func (f *_escFile) IsDir() bool { - return f.isDir -} - -func (f *_escFile) Sys() interface{} { - return f -} - -// _escFS returns a http.Filesystem for the embedded assets. If useLocal is true, -// the filesystem's contents are instead used. -func _escFS(useLocal bool) http.FileSystem { - if useLocal { - return _escLocal - } - return _escStatic -} - -// _escDir returns a http.Filesystem for the embedded assets on a given prefix dir. -// If useLocal is true, the filesystem's contents are instead used. -func _escDir(useLocal bool, name string) http.FileSystem { - if useLocal { - return _escDirectory{fs: _escLocal, name: name} - } - return _escDirectory{fs: _escStatic, name: name} -} - -// _escFSByte returns the named file from the embedded assets. If useLocal is -// true, the filesystem's contents are instead used. -func _escFSByte(useLocal bool, name string) ([]byte, error) { - if useLocal { - f, err := _escLocal.Open(name) - if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(f) - _ = f.Close() - return b, err - } - f, err := _escStatic.prepare(name) - if err != nil { - return nil, err - } - return f.data, nil -} - -// _escFSMustByte is the same as _escFSByte, but panics if name is not present. -func _escFSMustByte(useLocal bool, name string) []byte { - b, err := _escFSByte(useLocal, name) - if err != nil { - panic(err) - } - return b -} - -// _escFSString is the string version of _escFSByte. -func _escFSString(useLocal bool, name string) (string, error) { - b, err := _escFSByte(useLocal, name) - return string(b), err -} - -// _escFSMustString is the string version of _escFSMustByte. -func _escFSMustString(useLocal bool, name string) string { - return string(_escFSMustByte(useLocal, name)) -} - -var _escData = map[string]*_escFile{ - - "/config-schema.json": { - local: "config-schema.json", - size: 2771, - modtime: 1515512099, - compressed: ` -H4sIAAAAAAAC/+RWQY/TPBC951dE2T22m+/wnXot3JCKVAGHFarcZNLOEnvMeIKIUP87itNCkjpp6apc -OEUaz7z35nns+EcUx0kOLmO0gmSSRZysLJglGVFogOMlmQJ38dpChgVmymfNmrJHl+1Bq6ZkL2IXafri -yMzb6BPxLs1ZFTL/7/+0jT20dZifStwiTcmCyU5szpe12SlqtYM08/xtpdQWmlravkAmbcwyWWBBcMki -btqJ4yRjUAL5r0Cn1AmjaeF8vCDWSpqVXAnMBTUkfu3QpiSqkj3xBFQ/m7M9CmRSMVxbQ+7azKMXgeyO -Iz4ecMXHPzjgXmSEscPqc95+t+Qgf08sblj/yFB4A6FwT80IPKQ5FGiwGRWXamXXHnnVagzjm29jshSz -qpNZdwkF9FDGRCNxfBghFa4toZEhNxlYNT099wj6dJMSJ2RekNqXO5A8qcJUZdlH6uJ8Dlqw1Pk/2/tH -KisN7sb+b536e3f1ifgLmt0bvOmcv1NbKO9tyTqw8fe0ZC1k17gzqrzakqj7PV2/TCSFe831m2NRbDB3 -f/+uO+ZPdd+jBVPpsx1PSlUDuyTseDRgTRi+Vsj+P/wc8GCoLuoinjzfoxPiOmR636yAUWPbM75BwbfD -Zbem3hGB8T5/U1ze1FlA42ZbvwKDtIazP98fAIC2Um/8RIyDbIlKUGZkPvunLDoynM9N/1n1+9nUP5dR -MzuH6GcAAAD//0pj2wvTCgAA -`, - }, - - "/content-descriptor.json": { - local: "content-descriptor.json", - size: 1085, - modtime: 1515512099, - compressed: ` -H4sIAAAAAAAC/5yTwW7UMBCG73mKUVqpl27NoeIQVb3AnQPcEAevPY6nbGwznlW1oL47mniXJoAo3Vsy -+r+Zz8n4RwfQe6yOqQjl1A/QfyiY3uUklhIy6BMmgffHUGb4WNBRIGdn4lpbXFYXcbKKR5EyGPNQc9q0 -6k3m0Xi2QTZvbk2rXTSO/AmpgzG5YHKnyXXGWtr4X9MbJ4eCSubtAzpptcK5IAth7QfQgwH0E3qyn1q4 -lf48r0SEOadNIQfQAmNAxuTQw2LGjF8yBuU8hrp5FrvRE18Yj4ESae9qnqfP7FNr0Vf6/pKPRoASbA+C -9ZVOfxGhJG9v1xKeRqzygobjQ5E8si2RHLiI7mvdT9DYk1ZzuVZdfS1WBDnB1Z3djZlJ4nQ/3OmP9ejv -r875jkfXlf+ed/Uf9hZ21BQ1CIHzBI+RXASJVI/OMNkDbBF8fky7bD36c+xmk5WbTSnLfDtWiv+77DTZ -ERcrb5b9zhBc4s2zO7r2jN/2xKhin3+/McttXS9NB/Cle+p+BgAA///HjexwPQQAAA== -`, - }, - - "/defs-descriptor.json": { - local: "defs-descriptor.json", - size: 922, - modtime: 1515512099, - compressed: ` -H4sIAAAAAAAC/6STX2/TMBTF3/spLl7FgDZN4QFp0Ria2DsP42lTV93ZN/Ed8R/ZrqYy9bsjJ1naFYFA -PCSyj67Pub8b52kCIBRFGdgndlZUIK6oZst5F8FjSCw3LQZIDr56sl+cTciWAlwNx1yAa0+Sa5bYecx7 -09FFVJBzAIQhxfht62mUAASrnKpT8rEqS+fJyueMuHChKaPUZLBkgw2Vakwt927zZ6/Ue4uYAttmr3tM -iUKHd3d7Wdxg8WNZnK32y1cn09fF3XoxWz0t5+8/fNyVf1c2FV3Erk8SihuK6ZDuaLhJE8iw9ck1Ab1m -CVKT/B43Bvqz4GrIRe7+gWSaA9tuOwDA6Tm2jQuctLmozvOoFKmL03+cwMA1e/O5up0t1sVqVN6+q/L6 -srhZFmef1sVqdkS4CW38Ax9Cyz1ELoQ6OAOPmqWGpDkOVGBwC/cEyj3a1qEi9Wv/GAJu9zInMoe5vycF -ELULBvNXEJvAYtB3LzDQWpfw5fX8n7t46Dc2PQ1UZz9FdVw8RGdPyoPfojTor7ve+/cw50l+dpOfAQAA -//8aH/C2mgMAAA== -`, - }, - - "/defs.json": { - local: "defs.json", - size: 1670, - modtime: 1515512099, - compressed: ` -H4sIAAAAAAAC/7STza6bMBCF9zzFyO2S9oJtbGDb7hMpy6oLSiaJq2AjY6RWEe9e8RNChFuJKneRgGc8 -3zmeMbcAgByxKa2qnTKa5EC+4klp1a8aaBs8grtY054vpnXgLgi7GvUXo12hNFo41FiqkyqLoTwceTOA -5NBLABClXTqvAIj7XWOvprTDM9qhckhUSquqrUgOn2KaPsLFrykcUzkEu3Amx2IrmlEpfPA+vsIzuhVP -Yy55ygT3aczJlZDgW4UyShmTNGIiTbiUIooij6Jn15N0+x/T8enQJFlxN8/GBxZJwtbozXPxoTnNeCYk -zdb8zePw8eOUcyE5jySTUZYk1Nf8WOxNz7VLQaNxdyI5fJsCMKeG9EeLfZZ8eFt8cG9Ty+eNXeivvp9G -t9frYvf09t3Ti1c6FPy1DhtnlT5vd3jXGOtf66kq6sOAHf99V8n8+Imle9ykunAOrd5bU6N1CptFEQD5 -fIvD7in0ryMEy+fK1G6UfmdTE+tvpoL+1wV/AgAA//96IpqyhgYAAA== -`, - }, - - "/image-index-schema.json": { - local: "image-index-schema.json", - size: 2993, - modtime: 1515512099, - compressed: ` -H4sIAAAAAAAC/6yWz0/jOhDH7/0rRgGJC5CnJ/QOFeLy9sJpD4v2suJg7EkybGNnx1Ogu+r/vrJN2qRJ -C4Te2rHnO5/vxL/+zAAyg14zNULOZnPIvjZo/3dWFFlkuK1ViXBrDb7AtwY1FaRVnHoeck+9rrBWIa8S -aeZ5/uidvUjRS8dlblgVcvHPVZ5iJymPTJvi53nuGrS6LeljWpqdUyifUyifEmXVYEh1D4+oJcUadg2y -EPpsDsESQJbyvyP7ZCuFh27vKvJQEC4M+GQPPUiFECtDrAxJDJ6SGigPygJZwRI5IkTlCZ7yPuZGqnU5 -qFGTpXpZZ3P4dxtTL20shtZpJKuVpQK9+K79Vlkxq1WHXbDuzvuwnbbYl9f2ui30+Fd7HWH8tSTGUOvH -Jhrg0ZC6C2nn3bCn3zsRQyV6yTah+474yMIYyPcHhgskrIU4O3gAV8TFwVggo9VoYGApipwyFiHbYOEv -zKYnl2F3nOQGC7IUKvh8S9JRWA9Nv4czTASy8LAS9JNYRwDJyn9X++Fe+/8ePM2rRlzJqqlIg65Q//TL -GpJCi5sYz4ON8LdRIsgWzq7VonRMUtU38+uwFg2am7Ppfd9dN7u+lrzwb7pSsKCEHqZDwa6G54p0BRLO -leQFarWCBwTjnu3CKYNmOnWk2svcLJQUjush98c280Znh3PvNj60leOYYl2RoJYl404eQOZ6nnp7+PA+ -HmoPxye7zw9Cd9rhhcmW2c6E9ZjNY+I5fxyoy6fBLXkMuI3scSALVOE7HLuFW90DmP3Lslt2cG2+2yTA -+k3bT4pJWRm3/EYPZ/v+9Y8MZa2T+KDznz01tgdX3lWdfNZ1RWZjXtpf696zZ9zRpNfZmI3PGAigEXN4 -VmZjL8HOE24GcD9bz/4GAAD//yCnv52xCwAA -`, - }, - - "/image-layout-schema.json": { - local: "image-layout-schema.json", - size: 439, - modtime: 1515512099, - compressed: ` -H4sIAAAAAAAC/2yPQUvEMBCF7/0VQ/Sg4DYVPOW6pwVhD4IX8VDTaTvLNonJVFik/12SaRXRU5g38+W9 -91kBqA6TjRSYvFMG1DGg23vHLTmMcJjaAeGxvfiZ4cmOOLXqLlPXSQYDamQORutT8m4nau3joLvY9rxr -HrRoV8JRtyHJaO0DOruZpYLJtaZsrM/FWEi+BMysfzuhXbUQfcDIhEkZyG2yQyYl8TPGJLVk97fth1yA -74FHhOP+8LvyDbmy8JZ2EgZ6OuNtsS8fbrESR3LDj45unpSBl3UGUPd1UzdqnV/Lu1QAS2kS8X2miN03 -8l+PKnNL9RUAAP//k31n5bcBAAA= -`, - }, - - "/image-manifest-schema.json": { - local: "image-manifest-schema.json", - size: 921, - modtime: 1515512099, - compressed: ` -H4sIAAAAAAAC/5ySMW8iMRCF+/0VI0MJ+O501bZXUZxSJEoTpXB2x7uDWNsZmygo4r9HtnHAkCKifTvv -zTdv/dEAiB59x+QCWSNaEHcOzT9rgiKDDOtJDQj/lSGNPsC9w440dSpNL6J97rsRJxWtYwiulXLjrVlm -dWV5kD0rHZa//sqszbKP+mLxrZTWoenKVp9seVpSJJDTkSB7w95hdNuXDXZHzbF1yIHQixbiYQAiRzwi -+3xclq9vfhjJgybc9uDzheghjAhpOZTlkPPgLQeC8qAMkAk4ICeKFH7bZbKG/Uort16tmcjQtJtEC39O -mnovWpIO+YvorNE0nDcwZ9QxNqKhCcvSiOVV/H+ism/VHtmf2wuVYlb7imkdcIqjv099HJVi/ul2gENF -oYyxIb28CuXGus/TFpet9Kj9JdRM9qjJULJU9qawJlLB+Lojxoj19N07rP9JXXED8Nwcms8AAAD//7u3 -Dj+ZAwAA -`, - }, - - "/": { - isDir: true, - local: "", - }, -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/gen.go b/vendor/github.com/opencontainers/image-spec/schema/gen.go deleted file mode 100644 index ae78604fd8..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/gen.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -// Generates an embbedded http.FileSystem for all schema files -// using esc (https://github.com/mjibson/esc). - -// This should generally be invoked with `make schema-fs` -//go:generate esc -private -pkg=schema -include=.*\.json$ . diff --git a/vendor/github.com/opencontainers/image-spec/schema/image-index-schema.json b/vendor/github.com/opencontainers/image-spec/schema/image-index-schema.json deleted file mode 100644 index 8a962aab24..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/image-index-schema.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": "OpenContainer Image Index Specification", - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "https://opencontainers.org/schema/image/index", - "type": "object", - "properties": { - "schemaVersion": { - "description": "This field specifies the image index schema version as an integer", - "id": "https://opencontainers.org/schema/image/index/schemaVersion", - "type": "integer", - "minimum": 2, - "maximum": 2 - }, - "manifests": { - "type": "array", - "items": { - "id": "https://opencontainers.org/schema/image/manifestDescriptor", - "type": "object", - "required": [ - "mediaType", - "size", - "digest" - ], - "properties": { - "mediaType": { - "description": "the mediatype of the referenced object", - "$ref": "defs-descriptor.json#/definitions/mediaType" - }, - "size": { - "description": "the size in bytes of the referenced object", - "$ref": "defs.json#/definitions/int64" - }, - "digest": { - "description": "the cryptographic checksum digest of the object, in the pattern ':'", - "$ref": "defs-descriptor.json#/definitions/digest" - }, - "urls": { - "description": "a list of urls from which this object may be downloaded", - "$ref": "defs-descriptor.json#/definitions/urls" - }, - "platform": { - "id": "https://opencontainers.org/schema/image/platform", - "type": "object", - "required": [ - "architecture", - "os" - ], - "properties": { - "architecture": { - "id": "https://opencontainers.org/schema/image/platform/architecture", - "type": "string" - }, - "os": { - "id": "https://opencontainers.org/schema/image/platform/os", - "type": "string" - }, - "os.version": { - "id": "https://opencontainers.org/schema/image/platform/os.version", - "type": "string" - }, - "os.features": { - "id": "https://opencontainers.org/schema/image/platform/os.features", - "type": "array", - "items": { - "type": "string" - } - }, - "variant": { - "type": "string" - } - } - }, - "annotations": { - "id": "https://opencontainers.org/schema/image/descriptor/annotations", - "$ref": "defs-descriptor.json#/definitions/annotations" - } - } - } - }, - "annotations": { - "id": "https://opencontainers.org/schema/image/index/annotations", - "$ref": "defs-descriptor.json#/definitions/annotations" - } - }, - "required": [ - "schemaVersion", - "manifests" - ] -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/image-layout-schema.json b/vendor/github.com/opencontainers/image-spec/schema/image-layout-schema.json deleted file mode 100644 index 874d2174c7..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/image-layout-schema.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "description": "OpenContainer Image Layout Schema", - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "https://opencontainers.org/schema/image/layout", - "type": "object", - "properties": { - "imageLayoutVersion": { - "description": "version of the OCI Image Layout (in the oci-layout file)", - "type": "string", - "enum": [ - "1.0.0" - ] - } - }, - "required": [ - "imageLayoutVersion" - ] -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/image-manifest-schema.json b/vendor/github.com/opencontainers/image-spec/schema/image-manifest-schema.json deleted file mode 100644 index ec00748e19..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/image-manifest-schema.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "description": "OpenContainer Image Manifest Specification", - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "https://opencontainers.org/schema/image/manifest", - "type": "object", - "properties": { - "schemaVersion": { - "description": "This field specifies the image manifest schema version as an integer", - "id": "https://opencontainers.org/schema/image/manifest/schemaVersion", - "type": "integer", - "minimum": 2, - "maximum": 2 - }, - "config": { - "$ref": "content-descriptor.json" - }, - "layers": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "content-descriptor.json" - } - }, - "annotations": { - "id": "https://opencontainers.org/schema/image/manifest/annotations", - "$ref": "defs-descriptor.json#/definitions/annotations" - } - }, - "required": [ - "schemaVersion", - "config", - "layers" - ] -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/loader.go b/vendor/github.com/opencontainers/image-spec/schema/loader.go deleted file mode 100644 index c6bde00482..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/loader.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2018 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - - "github.com/xeipuuv/gojsonreference" - "github.com/xeipuuv/gojsonschema" -) - -// fsLoaderFactory implements gojsonschema.JSONLoaderFactory by reading files under the specified namespaces from the root of fs. -type fsLoaderFactory struct { - namespaces []string - fs http.FileSystem -} - -// newFSLoaderFactory returns a fsLoaderFactory reading files under the specified namespaces from the root of fs. -func newFSLoaderFactory(namespaces []string, fs http.FileSystem) *fsLoaderFactory { - return &fsLoaderFactory{ - namespaces: namespaces, - fs: fs, - } -} - -func (factory *fsLoaderFactory) New(source string) gojsonschema.JSONLoader { - return &fsLoader{ - factory: factory, - source: source, - } -} - -// refContents returns the contents of ref, if available in fsLoaderFactory. -func (factory *fsLoaderFactory) refContents(ref gojsonreference.JsonReference) ([]byte, error) { - refStr := ref.String() - path := "" - for _, ns := range factory.namespaces { - if strings.HasPrefix(refStr, ns) { - path = "/" + strings.TrimPrefix(refStr, ns) - break - } - } - if path == "" { - return nil, fmt.Errorf("Schema reference %#v unexpectedly not available in fsLoaderFactory with namespaces %#v", path, factory.namespaces) - } - - f, err := factory.fs.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - return ioutil.ReadAll(f) -} - -// fsLoader implements gojsonschema.JSONLoader by reading the document named by source from a fsLoaderFactory. -type fsLoader struct { - factory *fsLoaderFactory - source string -} - -// JsonSource implements gojsonschema.JSONLoader.JsonSource. The "Json" capitalization needs to be maintained to conform to the interface. -func (l *fsLoader) JsonSource() interface{} { // nolint: golint - return l.source -} - -func (l *fsLoader) LoadJSON() (interface{}, error) { - // Based on gojsonschema.jsonReferenceLoader.LoadJSON. - reference, err := gojsonreference.NewJsonReference(l.source) - if err != nil { - return nil, err - } - - refToURL := reference - refToURL.GetUrl().Fragment = "" - - body, err := l.factory.refContents(refToURL) - if err != nil { - return nil, err - } - - return decodeJSONUsingNumber(bytes.NewReader(body)) -} - -// decodeJSONUsingNumber returns JSON parsed from an io.Reader -func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { - // Copied from gojsonschema. - var document interface{} - - decoder := json.NewDecoder(r) - decoder.UseNumber() - - err := decoder.Decode(&document) - if err != nil { - return nil, err - } - - return document, nil -} - -// JsonReference implements gojsonschema.JSONLoader.JsonReference. The "Json" capitalization needs to be maintained to conform to the interface. -func (l *fsLoader) JsonReference() (gojsonreference.JsonReference, error) { // nolint: golint - return gojsonreference.NewJsonReference(l.JsonSource().(string)) -} - -func (l *fsLoader) LoaderFactory() gojsonschema.JSONLoaderFactory { - return l.factory -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/schema.go b/vendor/github.com/opencontainers/image-spec/schema/schema.go deleted file mode 100644 index 2a560552a8..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/schema.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "net/http" - - "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Media types for the OCI image formats -const ( - ValidatorMediaTypeDescriptor Validator = v1.MediaTypeDescriptor - ValidatorMediaTypeLayoutHeader Validator = v1.MediaTypeLayoutHeader - ValidatorMediaTypeManifest Validator = v1.MediaTypeImageManifest - ValidatorMediaTypeImageIndex Validator = v1.MediaTypeImageIndex - ValidatorMediaTypeImageConfig Validator = v1.MediaTypeImageConfig - ValidatorMediaTypeImageLayer unimplemented = v1.MediaTypeImageLayer -) - -var ( - // fs stores the embedded http.FileSystem - // having the OCI JSON schema files in root "/". - fs = _escFS(false) - - // schemaNamespaces is a set of URI prefixes which are treated as containing the schema files of fs. - // This is necessary because *.json schema files in this directory use "id" and "$ref" attributes which evaluate to such URIs, e.g. - // ./image-manifest-schema.json URI contains - // "id": "https://opencontainers.org/schema/image/manifest", - // and - // "$ref": "content-descriptor.json" - // which evaluates as a link to https://opencontainers.org/schema/image/content-descriptor.json . - // - // To support such links without accessing the network (and trying to load content which is not hosted at these URIs), - // fsLoaderFactory accepts any URI starting with one of the schemaNamespaces below, - // and uses _escFS to load them from the root of its in-memory filesystem tree. - // - // (Note that this must contain subdirectories before its parent directories for fsLoaderFactory.refContents to work.) - schemaNamespaces = []string{ - "https://opencontainers.org/schema/image/descriptor/", - "https://opencontainers.org/schema/image/index/", - "https://opencontainers.org/schema/image/manifest/", - "https://opencontainers.org/schema/image/", - "https://opencontainers.org/schema/", - } - - // specs maps OCI schema media types to schema URIs. - // These URIs are expected to be used only by fsLoaderFactory (which trims schemaNamespaces defined above) - // and should never cause a network access. - specs = map[Validator]string{ - ValidatorMediaTypeDescriptor: "https://opencontainers.org/schema/content-descriptor.json", - ValidatorMediaTypeLayoutHeader: "https://opencontainers.org/schema/image/image-layout-schema.json", - ValidatorMediaTypeManifest: "https://opencontainers.org/schema/image/image-manifest-schema.json", - ValidatorMediaTypeImageIndex: "https://opencontainers.org/schema/image/image-index-schema.json", - ValidatorMediaTypeImageConfig: "https://opencontainers.org/schema/image/config-schema.json", - } -) - -// FileSystem returns an in-memory filesystem including the schema files. -// The schema files are located at the root directory. -func FileSystem() http.FileSystem { - return fs -} diff --git a/vendor/github.com/opencontainers/image-spec/schema/validator.go b/vendor/github.com/opencontainers/image-spec/schema/validator.go deleted file mode 100644 index 029217c3b6..0000000000 --- a/vendor/github.com/opencontainers/image-spec/schema/validator.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "regexp" - - digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/xeipuuv/gojsonschema" -) - -// Validator wraps a media type string identifier -// and implements validation against a JSON schema. -type Validator string - -type validateFunc func(r io.Reader) error - -var mapValidate = map[Validator]validateFunc{ - ValidatorMediaTypeImageConfig: validateConfig, - ValidatorMediaTypeDescriptor: validateDescriptor, - ValidatorMediaTypeImageIndex: validateIndex, - ValidatorMediaTypeManifest: validateManifest, -} - -// ValidationError contains all the errors that happened during validation. -type ValidationError struct { - Errs []error -} - -func (e ValidationError) Error() string { - return fmt.Sprintf("%v", e.Errs) -} - -// Validate validates the given reader against the schema of the wrapped media type. -func (v Validator) Validate(src io.Reader) error { - buf, err := ioutil.ReadAll(src) - if err != nil { - return errors.Wrap(err, "unable to read the document file") - } - - if f, ok := mapValidate[v]; ok { - if f == nil { - return fmt.Errorf("internal error: mapValidate[%q] is nil", v) - } - err = f(bytes.NewReader(buf)) - if err != nil { - return err - } - } - - sl := newFSLoaderFactory(schemaNamespaces, fs).New(specs[v]) - ml := gojsonschema.NewStringLoader(string(buf)) - - result, err := gojsonschema.Validate(sl, ml) - if err != nil { - return errors.Wrapf( - WrapSyntaxError(bytes.NewReader(buf), err), - "schema %s: unable to validate", v) - } - - if result.Valid() { - return nil - } - - errs := make([]error, 0, len(result.Errors())) - for _, desc := range result.Errors() { - errs = append(errs, fmt.Errorf("%s", desc)) - } - - return ValidationError{ - Errs: errs, - } -} - -type unimplemented string - -func (v unimplemented) Validate(src io.Reader) error { - return fmt.Errorf("%s: unimplemented", v) -} - -func validateManifest(r io.Reader) error { - header := v1.Manifest{} - - buf, err := ioutil.ReadAll(r) - if err != nil { - return errors.Wrapf(err, "error reading the io stream") - } - - err = json.Unmarshal(buf, &header) - if err != nil { - return errors.Wrap(err, "manifest format mismatch") - } - - if header.Config.MediaType != string(v1.MediaTypeImageConfig) { - fmt.Printf("warning: config %s has an unknown media type: %s\n", header.Config.Digest, header.Config.MediaType) - } - - for _, layer := range header.Layers { - if layer.MediaType != string(v1.MediaTypeImageLayer) && - layer.MediaType != string(v1.MediaTypeImageLayerGzip) && - layer.MediaType != string(v1.MediaTypeImageLayerNonDistributable) && - layer.MediaType != string(v1.MediaTypeImageLayerNonDistributableGzip) { - fmt.Printf("warning: layer %s has an unknown media type: %s\n", layer.Digest, layer.MediaType) - } - } - return nil -} - -func validateDescriptor(r io.Reader) error { - header := v1.Descriptor{} - - buf, err := ioutil.ReadAll(r) - if err != nil { - return errors.Wrapf(err, "error reading the io stream") - } - - err = json.Unmarshal(buf, &header) - if err != nil { - return errors.Wrap(err, "descriptor format mismatch") - } - - err = header.Digest.Validate() - if err == digest.ErrDigestUnsupported { - // we ignore unsupported algorithms - fmt.Printf("warning: unsupported digest: %q: %v\n", header.Digest, err) - return nil - } - return err -} - -func validateIndex(r io.Reader) error { - header := v1.Index{} - - buf, err := ioutil.ReadAll(r) - if err != nil { - return errors.Wrapf(err, "error reading the io stream") - } - - err = json.Unmarshal(buf, &header) - if err != nil { - return errors.Wrap(err, "index format mismatch") - } - - for _, manifest := range header.Manifests { - if manifest.MediaType != string(v1.MediaTypeImageManifest) { - fmt.Printf("warning: manifest %s has an unknown media type: %s\n", manifest.Digest, manifest.MediaType) - } - if manifest.Platform != nil { - checkPlatform(manifest.Platform.OS, manifest.Platform.Architecture) - } - - } - - return nil -} - -func validateConfig(r io.Reader) error { - header := v1.Image{} - - buf, err := ioutil.ReadAll(r) - if err != nil { - return errors.Wrapf(err, "error reading the io stream") - } - - err = json.Unmarshal(buf, &header) - if err != nil { - return errors.Wrap(err, "config format mismatch") - } - - checkPlatform(header.OS, header.Architecture) - - envRegexp := regexp.MustCompile(`^[^=]+=.*$`) - for _, e := range header.Config.Env { - if !envRegexp.MatchString(e) { - return errors.Errorf("unexpected env: %q", e) - } - } - - return nil -} - -func checkPlatform(OS string, Architecture string) { - validCombins := map[string][]string{ - "android": {"arm"}, - "darwin": {"386", "amd64", "arm", "arm64"}, - "dragonfly": {"amd64"}, - "freebsd": {"386", "amd64", "arm"}, - "linux": {"386", "amd64", "arm", "arm64", "ppc64", "ppc64le", "mips64", "mips64le", "s390x"}, - "netbsd": {"386", "amd64", "arm"}, - "openbsd": {"386", "amd64", "arm"}, - "plan9": {"386", "amd64"}, - "solaris": {"amd64"}, - "windows": {"386", "amd64"}} - for os, archs := range validCombins { - if os == OS { - for _, arch := range archs { - if arch == Architecture { - return - } - } - fmt.Printf("warning: combination of %q and %q is invalid.\n", OS, Architecture) - } - } - fmt.Printf("warning: operating system %q of the bundle is not supported yet.\n", OS) -} diff --git a/vendor/github.com/opencontainers/image-tools/image/autodetect.go b/vendor/github.com/opencontainers/image-tools/image/autodetect.go deleted file mode 100644 index 54abbcc6b8..0000000000 --- a/vendor/github.com/opencontainers/image-tools/image/autodetect.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package image - -import ( - "io" - "io/ioutil" - "net/http" - "os" - - "github.com/pkg/errors" -) - -// supported autodetection types -const ( - TypeImageLayout = "imageLayout" - TypeImage = "image" - TypeImageZip = "imageZip" - TypeManifest = "manifest" - TypeImageIndex = "imageIndex" - TypeConfig = "config" -) - -// Autodetect detects the validation type for the given path -// or an error if the validation type could not be resolved. -func Autodetect(path string) (string, error) { - fi, err := os.Stat(path) - if err != nil { - return "", errors.Wrapf(err, "unable to access path") // err from os.Stat includes path name - } - - if fi.IsDir() { - return TypeImageLayout, nil - } - - f, err := os.Open(path) - if err != nil { - return "", errors.Wrap(err, "unable to open file") // os.Open includes the filename - } - defer f.Close() - - buf, err := ioutil.ReadAll(io.LimitReader(f, 512)) // read some initial bytes to detect content - if err != nil { - return "", errors.Wrap(err, "unable to read") - } - - mimeType := http.DetectContentType(buf) - - switch mimeType { - case "application/x-gzip", "application/x-rar-compressed", "application/octet-stream": - return TypeImage, nil - case "application/zip": - return TypeImageZip, nil - } - - return "", errors.New("unknown file type") -} diff --git a/vendor/github.com/opencontainers/image-tools/image/config.go b/vendor/github.com/opencontainers/image-tools/image/config.go deleted file mode 100644 index 45711990e4..0000000000 --- a/vendor/github.com/opencontainers/image-tools/image/config.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package image - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "path/filepath" - "strconv" - "strings" - - "github.com/opencontainers/image-spec/schema" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -func findConfig(w walker, d *v1.Descriptor) (*v1.Image, error) { - var c v1.Image - cpath := filepath.Join("blobs", string(d.Digest.Algorithm()), d.Digest.Hex()) - - switch err := w.find(cpath, func(path string, r io.Reader) error { - buf, err := ioutil.ReadAll(r) - if err != nil { - return errors.Wrapf(err, "%s: error reading config", path) - } - - if err := schema.ValidatorMediaTypeImageConfig.Validate(bytes.NewReader(buf)); err != nil { - return errors.Wrapf(err, "%s: config validation failed", path) - } - - if err := json.Unmarshal(buf, &c); err != nil { - return err - } - - return errEOW - }); err { - case nil: - return nil, fmt.Errorf("%s: config not found", cpath) - case errEOW: - return &c, nil - default: - return nil, err - } -} - -func runtimeSpec(c *v1.Image, rootfs string) (*specs.Spec, error) { - if c.OS != "linux" { - return nil, fmt.Errorf("%s: unsupported OS", c.OS) - } - - var s specs.Spec - s.Version = specs.Version - // we should at least apply the default spec, otherwise this is totally useless - s.Root = &specs.Root{} - s.Root.Path = rootfs - - s.Process = &specs.Process{} - s.Process.Terminal = true - s.Process.Cwd = "/" - if c.Config.WorkingDir != "" { - s.Process.Cwd = c.Config.WorkingDir - } - s.Process.Env = append(s.Process.Env, c.Config.Env...) - s.Process.Args = append(s.Process.Args, c.Config.Entrypoint...) - s.Process.Args = append(s.Process.Args, c.Config.Cmd...) - - if len(s.Process.Args) == 0 { - s.Process.Args = append(s.Process.Args, "sh") - } - - if uid, err := strconv.Atoi(c.Config.User); err == nil { - s.Process.User.UID = uint32(uid) - } else if ug := strings.Split(c.Config.User, ":"); len(ug) == 2 { - uid, err := strconv.Atoi(ug[0]) - if err != nil { - return nil, errors.New("config.User: unsupported uid format") - } - - gid, err := strconv.Atoi(ug[1]) - if err != nil { - return nil, errors.New("config.User: unsupported gid format") - } - - s.Process.User.UID = uint32(uid) - s.Process.User.GID = uint32(gid) - } else if c.Config.User != "" { - return nil, errors.New("config.User: unsupported format") - } - - s.Linux = &specs.Linux{} - - for vol := range c.Config.Volumes { - s.Mounts = append( - s.Mounts, - specs.Mount{ - Destination: vol, - Type: "bind", - Options: []string{"rbind"}, - }, - ) - } - - return &s, nil -} diff --git a/vendor/github.com/opencontainers/image-tools/image/descriptor.go b/vendor/github.com/opencontainers/image-tools/image/descriptor.go deleted file mode 100644 index 5bf19acf69..0000000000 --- a/vendor/github.com/opencontainers/image-tools/image/descriptor.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package image - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -const indexPath = "index.json" - -func listReferences(w walker) ([]v1.Descriptor, error) { - var descs []v1.Descriptor - var index v1.Index - - if err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { - if info.IsDir() || filepath.Clean(path) != indexPath { - return nil - } - - if err := json.NewDecoder(r).Decode(&index); err != nil { - return err - } - descs = index.Manifests - - return nil - }); err != nil { - return nil, err - } - - return descs, nil -} - -func findDescriptor(w walker, names []string) ([]v1.Descriptor, error) { - var descs []v1.Descriptor - var index v1.Index - dpath := "index.json" - - if err := w.find(dpath, func(path string, r io.Reader) error { - if err := json.NewDecoder(r).Decode(&index); err != nil { - return err - } - - descs = index.Manifests - for _, name := range names { - argsParts := strings.Split(name, "=") - if len(argsParts) != 2 { - return fmt.Errorf("each ref must contain two parts") - } - - switch argsParts[0] { - case "name": - for i := 0; i < len(descs); i++ { - if descs[i].Annotations[v1.AnnotationRefName] != argsParts[1] { - descs = append(descs[:i], descs[i+1:]...) - } - } - case "platform.os": - for i := 0; i < len(descs); i++ { - if descs[i].Platform != nil && index.Manifests[i].Platform.OS != argsParts[1] { - descs = append(descs[:i], descs[i+1:]...) - } - } - case "digest": - for i := 0; i < len(descs); i++ { - if string(descs[i].Digest) != argsParts[1] { - descs = append(descs[:i], descs[i+1:]...) - } - } - default: - return fmt.Errorf("criteria %q unimplemented", argsParts[0]) - } - } - - return nil - }); err != nil { - return nil, err - } - - if len(descs) == 0 { - return nil, fmt.Errorf("index.json: descriptor retrieved by refs %v is not match", names) - } else if len(descs) > 1 { - return nil, fmt.Errorf("index.json: descriptor retrieved by refs %v is not unique", names) - } - - return descs, nil -} - -func validateDescriptor(d *v1.Descriptor, w walker, mts []string) error { - var found bool - for _, mt := range mts { - if d.MediaType == mt { - found = true - break - } - } - if !found { - return fmt.Errorf("invalid descriptor MediaType %q", d.MediaType) - } - - if err := d.Digest.Validate(); err != nil { - return err - } - - // Copy the contents of the layer in to the verifier - verifier := d.Digest.Verifier() - numBytes, err := w.get(*d, verifier) - if err != nil { - return err - } - - if err != nil { - return errors.Wrap(err, "error generating hash") - } - - if numBytes != d.Size { - return errors.New("size mismatch") - } - - if !verifier.Verified() { - return errors.New("digest mismatch") - } - - return nil -} diff --git a/vendor/github.com/opencontainers/image-tools/image/doc.go b/vendor/github.com/opencontainers/image-tools/image/doc.go deleted file mode 100644 index de4163228d..0000000000 --- a/vendor/github.com/opencontainers/image-tools/image/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package image defines methods for validating, unpacking OCI images and creating OCI runtime bundle. -package image diff --git a/vendor/github.com/opencontainers/image-tools/image/image.go b/vendor/github.com/opencontainers/image-tools/image/image.go deleted file mode 100644 index ba4b860f1d..0000000000 --- a/vendor/github.com/opencontainers/image-tools/image/image.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package image - -import ( - "encoding/json" - "fmt" - "io" - "log" - "os" - "path/filepath" - "strings" - - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// ValidateLayout walks through the given file tree and validates the manifest -// pointed to by the given refs or returns an error if the validation failed. -func ValidateLayout(src string, refs []string, out *log.Logger) error { - return validate(newPathWalker(src), refs, out) -} - -// ValidateZip walks through the given file tree and validates the manifest -// pointed to by the given refs or returns an error if the validation failed. -func ValidateZip(src string, refs []string, out *log.Logger) error { - return validate(newZipWalker(src), refs, out) -} - -// ValidateFile opens the tar file given by the filename, then calls ValidateReader -func ValidateFile(tarFile string, refs []string, out *log.Logger) error { - f, err := os.Open(tarFile) - if err != nil { - return errors.Wrap(err, "unable to open file") - } - defer f.Close() - - return Validate(f, refs, out) -} - -// Validate walks through a tar stream and validates the manifest. -// * Check that all refs point to extant blobs -// * Checks that all referred blobs are valid -// * Checks that mime-types are correct -// returns error on validation failure -func Validate(r io.ReadSeeker, refs []string, out *log.Logger) error { - return validate(newTarWalker(r), refs, out) -} - -var validRefMediaTypes = []string{ - v1.MediaTypeImageManifest, - v1.MediaTypeImageIndex, -} - -func validate(w walker, refs []string, out *log.Logger) error { - var descs []v1.Descriptor - var err error - - if err = layoutValidate(w); err != nil { - return err - } - - if len(refs) == 0 { - out.Print("No ref specified, verify all refs") - descs, err = listReferences(w) - if err != nil { - return err - } - if len(descs) == 0 { - // TODO(runcom): ugly, we'll need a better way and library - // to express log levels. - // see https://github.com/opencontainers/image-spec/issues/288 - out.Print("WARNING: no descriptors found") - return nil - } - } else { - descs, err = findDescriptor(w, refs) - if err != nil { - return err - } - } - - for _, desc := range descs { - d := &desc - if err = validateDescriptor(d, w, validRefMediaTypes); err != nil { - return err - } - - if d.MediaType == validRefMediaTypes[0] { - m, err := findManifest(w, d) - if err != nil { - return err - } - - if err := validateManifest(m, w); err != nil { - return err - } - } - - if d.MediaType == validRefMediaTypes[1] { - index, err := findIndex(w, d) - if err != nil { - return err - } - - if err := validateIndex(index, w); err != nil { - return err - } - - if len(index.Manifests) == 0 { - fmt.Println("warning: no manifests found") - return nil - } - - for _, manifest := range index.Manifests { - m, err := findManifest(w, &manifest) - if err != nil { - return err - } - - if err := validateManifest(m, w); err != nil { - return err - } - } - } - } - - if out != nil && len(refs) > 0 { - out.Printf("reference %v: OK", refs) - } - - return nil -} - -// UnpackLayout walks through the file tree given by src and, using the layers -// specified in the manifest pointed to by the given ref, unpacks all layers in -// the given destination directory or returns an error if the unpacking failed. -func UnpackLayout(src, dest, platform string, refs []string) error { - return unpack(newPathWalker(src), dest, platform, refs) -} - -// UnpackZip opens and walks through the zip file given by src and, using the layers -// specified in the manifest pointed to by the given ref, unpacks all layers in -// the given destination directory or returns an error if the unpacking failed. -func UnpackZip(src, dest, platform string, refs []string) error { - return unpack(newZipWalker(src), dest, platform, refs) -} - -// UnpackFile opens the file pointed by tarFileName and calls Unpack on it. -func UnpackFile(tarFileName, dest, platform string, refs []string) error { - f, err := os.Open(tarFileName) - if err != nil { - return errors.Wrap(err, "unable to open file") - } - defer f.Close() - - return Unpack(f, dest, platform, refs) -} - -// Unpack walks through the tar stream and, using the layers specified in -// the manifest pointed to by the given ref, unpacks all layers in the given -// destination directory or returns an error if the unpacking failed. -// The destination will be created if it does not exist. -func Unpack(r io.ReadSeeker, dest, platform string, refs []string) error { - return unpack(newTarWalker(r), dest, platform, refs) -} - -func unpack(w walker, dest, platform string, refs []string) error { - if err := layoutValidate(w); err != nil { - return err - } - - descs, err := findDescriptor(w, refs) - if err != nil { - return err - } - - ref := &descs[0] - if err = validateDescriptor(ref, w, validRefMediaTypes); err != nil { - return err - } - - if ref.MediaType == validRefMediaTypes[0] { - m, err := findManifest(w, ref) - if err != nil { - return err - } - - if err := validateManifest(m, w); err != nil { - return err - } - - return unpackManifest(m, w, dest) - } - - if ref.MediaType == validRefMediaTypes[1] { - index, err := findIndex(w, ref) - if err != nil { - return err - } - - if err = validateIndex(index, w); err != nil { - return err - } - - manifests, err := filterManifest(w, index.Manifests, platform) - if err != nil { - return err - } - - for _, m := range manifests { - return unpackManifest(m, w, dest) - } - } - - return nil -} - -// CreateRuntimeBundleLayout walks through the file tree given by src and -// creates an OCI runtime bundle in the given destination dest -// or returns an error if the unpacking failed. -func CreateRuntimeBundleLayout(src, dest, root, platform string, refs []string) error { - return createRuntimeBundle(newPathWalker(src), dest, root, platform, refs) -} - -// CreateRuntimeBundleZip opens and walks through the zip file given by src -// and creates an OCI runtime bundle in the given destination dest -// or returns an error if the unpacking failed. -func CreateRuntimeBundleZip(src, dest, root, platform string, refs []string) error { - return createRuntimeBundle(newZipWalker(src), dest, root, platform, refs) -} - -// CreateRuntimeBundleFile opens the file pointed by tarFile and calls -// CreateRuntimeBundle. -func CreateRuntimeBundleFile(tarFile, dest, root, platform string, refs []string) error { - f, err := os.Open(tarFile) - if err != nil { - return errors.Wrap(err, "unable to open file") - } - defer f.Close() - - return createRuntimeBundle(newTarWalker(f), dest, root, platform, refs) -} - -// CreateRuntimeBundle walks through the given tar stream and -// creates an OCI runtime bundle in the given destination dest -// or returns an error if the unpacking failed. -func CreateRuntimeBundle(r io.ReadSeeker, dest, root, platform string, refs []string) error { - return createRuntimeBundle(newTarWalker(r), dest, root, platform, refs) -} - -func createRuntimeBundle(w walker, dest, rootfs, platform string, refs []string) error { - if err := layoutValidate(w); err != nil { - return err - } - - descs, err := findDescriptor(w, refs) - if err != nil { - return err - } - - ref := &descs[0] - if err = validateDescriptor(ref, w, validRefMediaTypes); err != nil { - return err - } - - if ref.MediaType == validRefMediaTypes[0] { - m, err := findManifest(w, ref) - if err != nil { - return err - } - - if err := validateManifest(m, w); err != nil { - return err - } - - return createBundle(w, m, dest, rootfs) - } - - if ref.MediaType == validRefMediaTypes[1] { - index, err := findIndex(w, ref) - if err != nil { - return err - } - - if err = validateIndex(index, w); err != nil { - return err - } - - manifests, err := filterManifest(w, index.Manifests, platform) - if err != nil { - return err - } - - for _, m := range manifests { - return createBundle(w, m, dest, rootfs) - } - } - - return nil -} - -func createBundle(w walker, m *v1.Manifest, dest, rootfs string) (retErr error) { - c, err := findConfig(w, &m.Config) - if err != nil { - return err - } - - if _, err = os.Stat(dest); err != nil { - if os.IsNotExist(err) { - if err2 := os.MkdirAll(dest, 0755); err2 != nil { - return err2 - } - defer func() { - if retErr != nil { - if err3 := os.RemoveAll(dest); err3 != nil { - fmt.Printf("Failed to clean up %q: %s\n", dest, err3.Error()) - } - } - }() - } else { - return err - } - } - - if err = unpackManifest(m, w, filepath.Join(dest, rootfs)); err != nil { - return err - } - - spec, err := runtimeSpec(c, rootfs) - if err != nil { - return err - } - - f, err := os.Create(filepath.Join(dest, "config.json")) - if err != nil { - return err - } - defer f.Close() - - return json.NewEncoder(f).Encode(spec) -} - -// filertManifest returns a filtered list of manifests -func filterManifest(w walker, Manifests []v1.Descriptor, platform string) ([]*v1.Manifest, error) { - var manifests []*v1.Manifest - - argsParts := strings.Split(platform, ":") - if len(argsParts) != 2 { - return manifests, fmt.Errorf("platform must have os and arch when reftype is index") - } - - if len(Manifests) == 0 { - fmt.Println("warning: no manifests found") - return manifests, nil - } - - for _, manifest := range Manifests { - m, err := findManifest(w, &manifest) - if err != nil { - return manifests, err - } - - if err := validateManifest(m, w); err != nil { - return manifests, err - } - if strings.EqualFold(manifest.Platform.OS, argsParts[0]) && strings.EqualFold(manifest.Platform.Architecture, argsParts[1]) { - manifests = append(manifests, m) - } - } - - if len(manifests) == 0 { - return manifests, fmt.Errorf("There is no matching manifest") - } - - return manifests, nil -} diff --git a/vendor/github.com/opencontainers/image-tools/image/index.go b/vendor/github.com/opencontainers/image-tools/image/index.go deleted file mode 100644 index f0c8d61fc6..0000000000 --- a/vendor/github.com/opencontainers/image-tools/image/index.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package image - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/opencontainers/image-spec/schema" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func findIndex(w walker, d *v1.Descriptor) (*v1.Index, error) { - var index v1.Index - ipath := filepath.Join("blobs", string(d.Digest.Algorithm()), d.Digest.Hex()) - - switch err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { - if info.IsDir() || filepath.Clean(path) != ipath { - return nil - } - - buf, err := ioutil.ReadAll(r) - if err != nil { - return errors.Wrapf(err, "%s: error reading index", path) - } - - if err := schema.ValidatorMediaTypeImageIndex.Validate(bytes.NewReader(buf)); err != nil { - return errors.Wrapf(err, "%s: index validation failed", path) - } - - if err := json.Unmarshal(buf, &index); err != nil { - return err - } - - return errEOW - }); err { - case errEOW: - return &index, nil - case nil: - return nil, fmt.Errorf("index not found") - default: - return nil, err - } -} - -func validateIndex(index *v1.Index, w walker) error { - for _, manifest := range index.Manifests { - if err := validateDescriptor(&manifest, w, []string{v1.MediaTypeImageManifest}); err != nil { - return errors.Wrap(err, "manifest validation failed") - } - } - return nil -} diff --git a/vendor/github.com/opencontainers/image-tools/image/layout.go b/vendor/github.com/opencontainers/image-tools/image/layout.go deleted file mode 100644 index 352b1b95f7..0000000000 --- a/vendor/github.com/opencontainers/image-tools/image/layout.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package image - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/opencontainers/image-spec/schema" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func layoutValidate(w walker) error { - var blobsExist, indexExist, layoutExist bool - - if err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { - if strings.EqualFold(filepath.Base(path), "blobs") { - blobsExist = true - if !info.IsDir() { - return fmt.Errorf("blobs is not a directory") - } - - return nil - } - - if strings.EqualFold(filepath.Base(path), "index.json") { - indexExist = true - if info.IsDir() { - return fmt.Errorf("index.json is a directory") - } - - buf, err := ioutil.ReadAll(r) - if err != nil { - return errors.Wrap(err, "error reading index.json") - } - - if err := schema.ValidatorMediaTypeImageIndex.Validate(bytes.NewReader(buf)); err != nil { - return errors.Wrap(err, "index.json validation failed") - } - - return nil - } - - if strings.EqualFold(filepath.Base(path), "oci-layout") { - layoutExist = true - if info.IsDir() { - return fmt.Errorf("oci-layout is a directory") - } - - var imageLayout v1.ImageLayout - buf, err := ioutil.ReadAll(r) - if err != nil { - return errors.Wrap(err, "error reading oci-layout") - } - - if err := schema.ValidatorMediaTypeLayoutHeader.Validate(bytes.NewReader(buf)); err != nil { - return errors.Wrap(err, "oci-layout validation failed") - } - - if err := json.Unmarshal(buf, &imageLayout); err != nil { - return errors.Wrap(err, "oci-layout format mismatch") - } - - return nil - } - - return nil - }); err != nil { - return err - } - - if !blobsExist { - return fmt.Errorf("image layout must contain blobs directory") - } - - if !indexExist { - return fmt.Errorf("image layout must contain index.json file") - } - - if !layoutExist { - return fmt.Errorf("image layout must contain oci-layout file") - } - - return nil -} diff --git a/vendor/github.com/opencontainers/image-tools/image/manifest.go b/vendor/github.com/opencontainers/image-tools/image/manifest.go deleted file mode 100644 index 9400121ae9..0000000000 --- a/vendor/github.com/opencontainers/image-tools/image/manifest.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Modifications by: Sylabs Inc. -// Add u+w if we aren't root to allow extraction -// - -package image - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/opencontainers/image-spec/schema" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func findManifest(w walker, d *v1.Descriptor) (*v1.Manifest, error) { - var m v1.Manifest - mpath := filepath.Join("blobs", string(d.Digest.Algorithm()), d.Digest.Hex()) - - switch err := w.find(mpath, func(path string, r io.Reader) error { - buf, err := ioutil.ReadAll(r) - if err != nil { - return errors.Wrapf(err, "%s: error reading manifest", path) - } - - if err := schema.ValidatorMediaTypeManifest.Validate(bytes.NewReader(buf)); err != nil { - return errors.Wrapf(err, "%s: manifest validation failed", path) - } - - if err := json.Unmarshal(buf, &m); err != nil { - return err - } - - return errEOW - }); err { - case nil: - return nil, fmt.Errorf("%s: manifest not found", mpath) - case errEOW: - return &m, nil - default: - return nil, err - } -} - -func validateManifest(m *v1.Manifest, w walker) error { - if err := validateDescriptor(&m.Config, w, []string{v1.MediaTypeImageConfig}); err != nil { - return errors.Wrap(err, "config validation failed") - } - - validLayerMediaTypes := []string{ - v1.MediaTypeImageLayer, - v1.MediaTypeImageLayerGzip, - v1.MediaTypeImageLayerNonDistributable, - v1.MediaTypeImageLayerNonDistributableGzip, - } - - for _, d := range m.Layers { - if err := validateDescriptor(&d, w, validLayerMediaTypes); err != nil { - return errors.Wrap(err, "layer validation failed") - } - } - - return nil -} - -func unpackManifest(m *v1.Manifest, w walker, dest string) (retErr error) { - // error out if the dest directory is not empty - s, err := ioutil.ReadDir(dest) - if err != nil && !os.IsNotExist(err) { // We'll create the dir later - return errors.Wrap(err, "unpack: unable to open dest") // err contains dest - } - if len(s) > 0 { - return fmt.Errorf("%s is not empty", dest) - } - defer func() { - // if we encounter error during unpacking - // clean up the partially-unpacked destination - if retErr != nil { - if err := os.RemoveAll(dest); err != nil { - fmt.Printf("Error: failed to remove partially-unpacked destination %v", err) - } - } - }() - for _, d := range m.Layers { - lpath := filepath.Join("blobs", string(d.Digest.Algorithm()), d.Digest.Hex()) - switch err := w.find(lpath, func(path string, r io.Reader) error { - if err := unpackLayer(d.MediaType, path, dest, r); err != nil { - return errors.Wrap(err, "unpack: error extracting layer") - } - - return errEOW - }); err { - case nil: - return fmt.Errorf("%s: layer not found", dest) - case errEOW: - default: - return err - } - } - return nil -} - -func getReader(path, mediaType, comp string, buf io.Reader) (io.Reader, error) { - switch comp { - case "gzip": - if !strings.HasSuffix(mediaType, "+gzip") { - logrus.Debugf("%q: %s media type with non-%s file", path, comp, comp) - } - - return gzip.NewReader(buf) - case "bzip2": - if !strings.HasSuffix(mediaType, "+bzip2") { - logrus.Debugf("%q: %s media type with non-%s file", path, comp, comp) - } - - return bzip2.NewReader(buf), nil - case "xz": - return nil, errors.New("xz layers are not supported") - default: - if strings.Contains(mediaType, "+") { - logrus.Debugf("%q: %s media type with non-%s file", path, comp, comp) - } - - return buf, nil - } -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(r *bufio.Reader) (string, error) { - source, err := r.Peek(10) - if err != nil { - return "", err - } - - for compression, m := range map[string][]byte{ - "bzip2": {0x42, 0x5A, 0x68}, - "gzip": {0x1F, 0x8B, 0x08}, - // FIXME needs decompression support - // "xz": {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debug("Len too short") - continue - } - if bytes.Equal(m, source[:len(m)]) { - return compression, nil - } - } - return "plain", nil -} - -func unpackLayer(mediaType, path, dest string, r io.Reader) error { - entries := make(map[string]bool) - - buf := bufio.NewReader(r) - - comp, err := DetectCompression(buf) - if err != nil { - return err - } - - reader, err := getReader(path, mediaType, comp, buf) - if err != nil { - return err - } - - var dirs []*tar.Header - tr := tar.NewReader(reader) - -loop: - for { - hdr, err := tr.Next() - switch err { - case io.EOF: - break loop - case nil: - // success, continue below - default: - return errors.Wrapf(err, "error advancing tar stream") - } - - var whiteout bool - whiteout, err = unpackLayerEntry(dest, hdr, tr, &entries) - if err != nil { - return err - } - if whiteout { - continue loop - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - finfo := hdr.FileInfo() - // I believe the old version was using time.Now().UTC() to overcome an - // invalid error from chtimes.....but here we lose hdr.AccessTime like this... - if err := os.Chtimes(path, time.Now().UTC(), finfo.ModTime()); err != nil { - return errors.Wrap(err, "error changing time") - } - } - return nil -} - -// unpackLayerEntry unpacks a single entry from a layer. -func unpackLayerEntry(dest string, header *tar.Header, reader io.Reader, entries *map[string]bool) (whiteout bool, err error) { - header.Name = filepath.Clean(header.Name) - if !strings.HasSuffix(header.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(header.Name) - parentPath := filepath.Join(dest, parent) - if _, err2 := os.Lstat(parentPath); err2 != nil && os.IsNotExist(err2) { - if err3 := os.MkdirAll(parentPath, 0755); err3 != nil { - return false, err3 - } - } - } - path := filepath.Join(dest, header.Name) - if (*entries)[path] { - return false, fmt.Errorf("duplicate entry for %s", path) - } - (*entries)[path] = true - rel, err := filepath.Rel(dest, path) - if err != nil { - return false, err - } - info := header.FileInfo() - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return false, fmt.Errorf("%q is outside of %q", header.Name, dest) - } - - if strings.HasPrefix(info.Name(), ".wh.") { - path = strings.Replace(path, ".wh.", "", 1) - - if err = os.RemoveAll(path); err != nil { - return true, errors.Wrap(err, "unable to delete whiteout path") - } - - return true, nil - } - - if header.Typeflag != tar.TypeDir { - err = os.RemoveAll(path) - if err != nil && !os.IsNotExist(err) { - return false, err - } - } - - // SINGULARITY_PATCH - // Add u+w if we aren't root to allow extractions - extraPerms := os.FileMode(0000) - if os.Getuid() != 0 { - extraPerms = 0600 - } - - switch header.Typeflag { - case tar.TypeDir: - fi, err := os.Lstat(path) - if err != nil && !os.IsNotExist(err) { - return false, err - } - if os.IsNotExist(err) || !fi.IsDir() { - err = os.RemoveAll(path) - if err != nil && !os.IsNotExist(err) { - return false, err - } - err = os.MkdirAll(path, info.Mode()|extraPerms) - if err != nil { - return false, err - } - } - - case tar.TypeReg, tar.TypeRegA: - f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, info.Mode()|extraPerms) - if err != nil { - return false, errors.Wrap(err, "unable to open file") - } - - if _, err := io.Copy(f, reader); err != nil { - f.Close() - return false, errors.Wrap(err, "unable to copy") - } - f.Close() - - case tar.TypeLink: - target := filepath.Join(dest, header.Linkname) - - if !strings.HasPrefix(target, dest) { - return false, fmt.Errorf("invalid hardlink %q -> %q", target, header.Linkname) - } - - if err := os.Link(target, path); err != nil { - return false, err - } - - case tar.TypeSymlink: - target := filepath.Join(filepath.Dir(path), header.Linkname) - - if !strings.HasPrefix(target, dest) { - return false, fmt.Errorf("invalid symlink %q -> %q", path, header.Linkname) - } - - if err := os.Symlink(header.Linkname, path); err != nil { - return false, err - } - case tar.TypeXGlobalHeader: - return false, nil - } - - return false, nil -} diff --git a/vendor/github.com/opencontainers/image-tools/image/project.go b/vendor/github.com/opencontainers/image-tools/image/project.go deleted file mode 100644 index b87186f795..0000000000 --- a/vendor/github.com/opencontainers/image-tools/image/project.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package image - -// SpecURL is the URL for the image-spec repository -var SpecURL = "https://github.com/opencontainers/image-spec" - -// IssuesURL is the URL for the issues of image-tools -var IssuesURL = "https://github.com/opencontainers/image-tools/issues" diff --git a/vendor/github.com/opencontainers/image-tools/image/walker.go b/vendor/github.com/opencontainers/image-tools/image/walker.go deleted file mode 100644 index 88b01df838..0000000000 --- a/vendor/github.com/opencontainers/image-tools/image/walker.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package image - -import ( - "archive/tar" - "archive/zip" - "fmt" - "io" - "os" - "path/filepath" - "sync" - - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var ( - errEOW = fmt.Errorf("end of walk") // error to signal stop walking -) - -// walkFunc is a function type that gets called for each file or directory visited by the Walker. -type walkFunc func(path string, _ os.FileInfo, _ io.Reader) error - -type findFunc func(path string, r io.Reader) error - -// walker is the interface that defines how to access a given archival format -type walker interface { - - // walk calls walkfunc for every entity in the archive - walk(walkFunc) error - - // get will copy an arbitrary blob, defined by desc, in to dst. returns - // the number of bytes copied on success. - get(desc v1.Descriptor, dst io.Writer) (int64, error) - - // find calls findFunc for handling content of path - find(path string, ff findFunc) error -} - -// tarWalker exposes access to image layouts in a tar file. -type tarWalker struct { - r io.ReadSeeker - - // Synchronize use of the reader - mut sync.Mutex -} - -// newTarWalker returns a Walker that walks through .tar files. -func newTarWalker(r io.ReadSeeker) walker { - return &tarWalker{r: r} -} - -func (w *tarWalker) walk(f walkFunc) error { - w.mut.Lock() - defer w.mut.Unlock() - - if _, err := w.r.Seek(0, io.SeekStart); err != nil { - return errors.Wrapf(err, "unable to reset") - } - - tr := tar.NewReader(w.r) - -loop: - for { - hdr, err := tr.Next() - switch err { - case io.EOF: - break loop - case nil: - // success, continue below - default: - return errors.Wrapf(err, "error advancing tar stream") - } - - info := hdr.FileInfo() - if err := f(hdr.Name, info, tr); err != nil { - return err - } - } - - return nil -} - -func (w *tarWalker) get(desc v1.Descriptor, dst io.Writer) (int64, error) { - var bytes int64 - done := false - - expectedPath := filepath.Join("blobs", string(desc.Digest.Algorithm()), desc.Digest.Hex()) - - f := func(path string, info os.FileInfo, rdr io.Reader) error { - var err error - if done { - return nil - } - - if filepath.Clean(path) == expectedPath && !info.IsDir() { - if bytes, err = io.Copy(dst, rdr); err != nil { - return errors.Wrapf(err, "get failed: failed to copy blob to destination") - } - done = true - } - return nil - } - - if err := w.walk(f); err != nil { - return 0, errors.Wrapf(err, "get failed: unable to walk") - } - if !done { - return 0, os.ErrNotExist - } - - return bytes, nil -} - -func (w *tarWalker) find(path string, ff findFunc) error { - done := false - - f := func(relpath string, info os.FileInfo, rdr io.Reader) error { - var err error - if done { - return nil - } - - if filepath.Clean(relpath) == path && !info.IsDir() { - if err = ff(relpath, rdr); err != nil { - return err - } - done = true - } - return nil - } - - if err := w.walk(f); err != nil { - return errors.Wrapf(err, "find failed: unable to walk") - } - if !done { - return os.ErrNotExist - } - - return nil -} - -type eofReader struct{} - -func (eofReader) Read(_ []byte) (int, error) { - return 0, io.EOF -} - -type pathWalker struct { - root string -} - -// newPathWalker returns a Walker that walks through directories -// starting at the given root path. It does not follow symlinks. -func newPathWalker(root string) walker { - return &pathWalker{root} -} - -func (w *pathWalker) walk(f walkFunc) error { - return filepath.Walk(w.root, func(path string, info os.FileInfo, err error) error { - // MUST check error value, to make sure the `os.FileInfo` is available. - // Otherwise panic risk will exist. - if err != nil { - return errors.Wrap(err, "error walking path") - } - - rel, err := filepath.Rel(w.root, path) - if err != nil { - return errors.Wrap(err, "error walking path") // err from filepath.Walk includes path name - } - - if info.IsDir() { // behave like a tar reader for directories - return f(rel, info, eofReader{}) - } - - file, err := os.Open(path) - if err != nil { - return errors.Wrap(err, "unable to open file") // os.Open includes the path - } - defer file.Close() - - return f(rel, info, file) - }) -} - -func (w *pathWalker) get(desc v1.Descriptor, dst io.Writer) (int64, error) { - name := filepath.Join(w.root, "blobs", string(desc.Digest.Algorithm()), desc.Digest.Hex()) - - info, err := os.Stat(name) - if err != nil { - return 0, err - } - - if info.IsDir() { - return 0, fmt.Errorf("object is dir") - } - - fp, err := os.Open(name) - if err != nil { - return 0, errors.Wrapf(err, "get failed") - } - defer fp.Close() - - nbytes, err := io.Copy(dst, fp) - if err != nil { - return 0, errors.Wrapf(err, "get failed: failed to copy blob to destination") - } - return nbytes, nil -} - -func (w *pathWalker) find(path string, ff findFunc) error { - name := filepath.Join(w.root, path) - - info, err := os.Stat(name) - if err != nil { - return err - } - - if info.IsDir() { - return fmt.Errorf("object is dir") - } - - file, err := os.Open(name) - if err != nil { - return errors.Wrap(err, "unable to open file") // os.Open includes the path - } - defer file.Close() - - return ff(name, file) -} - -type zipWalker struct { - fileName string -} - -// newWalkWalker returns a Walker that walks through .zip files. -func newZipWalker(fileName string) walker { - return &zipWalker{fileName} -} - -func (w *zipWalker) walk(f walkFunc) error { - r, err := zip.OpenReader(w.fileName) - if err != nil { - return err - } - defer r.Close() - - for _, file := range r.File { - rc, err := file.Open() - if err != nil { - return err - } - defer rc.Close() - info := file.FileInfo() - if err := f(file.Name, info, rc); err != nil { - return err - } - } - - return nil -} - -func (w *zipWalker) get(desc v1.Descriptor, dst io.Writer) (int64, error) { - var bytes int64 - done := false - - expectedPath := filepath.Join("blobs", string(desc.Digest.Algorithm()), desc.Digest.Hex()) - - f := func(path string, info os.FileInfo, rdr io.Reader) error { - var err error - if done { - return nil - } - - if path == expectedPath && !info.IsDir() { - if bytes, err = io.Copy(dst, rdr); err != nil { - return errors.Wrapf(err, "get failed: failed to copy blob to destination") - } - done = true - } - return nil - } - - if err := w.walk(f); err != nil { - return 0, errors.Wrapf(err, "get failed: unable to walk") - } - if !done { - return 0, os.ErrNotExist - } - - return bytes, nil -} - -func (w *zipWalker) find(path string, ff findFunc) error { - done := false - - f := func(relpath string, info os.FileInfo, rdr io.Reader) error { - var err error - if done { - return nil - } - - if filepath.Clean(relpath) == path && !info.IsDir() { - if err = ff(relpath, rdr); err != nil { - return err - } - done = true - } - return nil - } - - if err := w.walk(f); err != nil { - return errors.Wrapf(err, "find failed: unable to walk") - } - if !done { - return os.ErrNotExist - } - - return nil -} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/config.go b/vendor/github.com/opencontainers/runtime-tools/generate/config.go index 9421b6d8f1..164fdf1410 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/config.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/config.go @@ -94,7 +94,8 @@ func (g *Generator) initConfigLinuxResourcesBlockIO() { } } -func (g *Generator) initConfigLinuxResourcesCPU() { +// InitConfigLinuxResourcesCPU initializes CPU of Linux resources +func (g *Generator) InitConfigLinuxResourcesCPU() { g.initConfigLinuxResources() if g.Config.Linux.Resources.CPU == nil { g.Config.Linux.Resources.CPU = &rspec.LinuxCPU{} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go index 07b57b44b7..900278f9f8 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go @@ -703,43 +703,43 @@ func (g *Generator) DropLinuxResourcesBlockIOThrottleWriteIOPSDevice(major int64 // SetLinuxResourcesCPUShares sets g.Config.Linux.Resources.CPU.Shares. func (g *Generator) SetLinuxResourcesCPUShares(shares uint64) { - g.initConfigLinuxResourcesCPU() + g.InitConfigLinuxResourcesCPU() g.Config.Linux.Resources.CPU.Shares = &shares } // SetLinuxResourcesCPUQuota sets g.Config.Linux.Resources.CPU.Quota. func (g *Generator) SetLinuxResourcesCPUQuota(quota int64) { - g.initConfigLinuxResourcesCPU() + g.InitConfigLinuxResourcesCPU() g.Config.Linux.Resources.CPU.Quota = "a } // SetLinuxResourcesCPUPeriod sets g.Config.Linux.Resources.CPU.Period. func (g *Generator) SetLinuxResourcesCPUPeriod(period uint64) { - g.initConfigLinuxResourcesCPU() + g.InitConfigLinuxResourcesCPU() g.Config.Linux.Resources.CPU.Period = &period } // SetLinuxResourcesCPURealtimeRuntime sets g.Config.Linux.Resources.CPU.RealtimeRuntime. func (g *Generator) SetLinuxResourcesCPURealtimeRuntime(time int64) { - g.initConfigLinuxResourcesCPU() + g.InitConfigLinuxResourcesCPU() g.Config.Linux.Resources.CPU.RealtimeRuntime = &time } // SetLinuxResourcesCPURealtimePeriod sets g.Config.Linux.Resources.CPU.RealtimePeriod. func (g *Generator) SetLinuxResourcesCPURealtimePeriod(period uint64) { - g.initConfigLinuxResourcesCPU() + g.InitConfigLinuxResourcesCPU() g.Config.Linux.Resources.CPU.RealtimePeriod = &period } // SetLinuxResourcesCPUCpus sets g.Config.Linux.Resources.CPU.Cpus. func (g *Generator) SetLinuxResourcesCPUCpus(cpus string) { - g.initConfigLinuxResourcesCPU() + g.InitConfigLinuxResourcesCPU() g.Config.Linux.Resources.CPU.Cpus = cpus } // SetLinuxResourcesCPUMems sets g.Config.Linux.Resources.CPU.Mems. func (g *Generator) SetLinuxResourcesCPUMems(mems string) { - g.initConfigLinuxResourcesCPU() + g.InitConfigLinuxResourcesCPU() g.Config.Linux.Resources.CPU.Mems = mems } diff --git a/vendor/github.com/rootless-containers/proto/COPYING b/vendor/github.com/rootless-containers/proto/COPYING new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/rootless-containers/proto/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.pb.go b/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.pb.go new file mode 100644 index 0000000000..4a2f015f3e --- /dev/null +++ b/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: rootlesscontainers.proto + +/* +Package rootlesscontainers is a generated protocol buffer package. + +The rootlesscontainers package is maintained at https://rootlesscontaine.rs/ . +If you want to extend the resource definition, please open a PR. + +It is generated from these files: + rootlesscontainers.proto + +It has these top-level messages: + Resource +*/ +package rootlesscontainers + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Resource defines the schema for "user.rootlesscontainers" xattr values. +// The resource can be used as a persistent storage for emulated `chown(2)` syscall. +// Syscall emulators SHOULD try to hide this xattr from the emulated environment. +type Resource struct { + // Zero-value MUST be parsed as a literally zero-value, not "unset". + // To keep both uid and gid unchaged, the entire xattr value SHOULD be removed. + // To keep either one of uid or gid unchaged, 0xFFFFFFFF (in other words, + // `(uint32_t) -1`, see also chown(2)) value SHOULD be set. + // (Because some protobuf bindings cannot distinguish "unset" from zero-value.) + Uid uint32 `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Resource) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *Resource) GetGid() uint32 { + if m != nil { + return m.Gid + } + return 0 +} + +func init() { + proto.RegisterType((*Resource)(nil), "rootlesscontainers.Resource") +} + +func init() { proto.RegisterFile("rootlesscontainers.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 99 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0xca, 0xcf, 0x2f, + 0xc9, 0x49, 0x2d, 0x2e, 0x4e, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, 0x2a, 0xd6, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc2, 0x94, 0x51, 0xd2, 0xe3, 0xe2, 0x08, 0x4a, 0x2d, 0xce, + 0x2f, 0x2d, 0x4a, 0x4e, 0x15, 0x12, 0xe0, 0x62, 0x2e, 0xcd, 0x4c, 0x91, 0x60, 0x54, 0x60, 0xd4, + 0xe0, 0x0d, 0x02, 0x31, 0x41, 0x22, 0xe9, 0x99, 0x29, 0x12, 0x4c, 0x10, 0x91, 0xf4, 0xcc, 0x94, + 0x24, 0x36, 0xb0, 0x51, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xac, 0x07, 0x53, 0x66, + 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers_generate.go b/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers_generate.go new file mode 100644 index 0000000000..405eef0cc1 --- /dev/null +++ b/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers_generate.go @@ -0,0 +1,37 @@ +/* + * rootlesscontainers-proto: persistent rootless filesystem emulation + * Copyright (C) 2018 Rootless Containers Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rootlesscontainers + +// Generate everything for our protobuf. +//go:generate protoc --go_out=import_path=rootlesscontainers:. rootlesscontainers.proto + +// Keyname is the official xattr key used to store rootlesscontainers.proto +// blobs, and is the only key we will treat in this special way. +const Keyname = "user.rootlesscontainers" + +// NoopID is the uint32 that represents the "noop" id for uid/gid values. It is +// equal to uint32(-1) but since we cannot write that in Go we have to +// explicitly write the wrapped value. +var NoopID uint32 = 0xFFFFFFFF + +// IsDefault returns whether the given Resource is the default. If a Resource +// is equal to the default Resource then it is not necesary to include it on +// the filesystem. +func IsDefault(r Resource) bool { + return r.Uid == NoopID && r.Gid == NoopID +} diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml index a23296a53b..2f19b4a757 100644 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -1,9 +1,7 @@ language: go go: - - 1.6.x - - 1.7.x - - 1.8.x - - tip + - 1.9.x + - 1.10.x env: - GOMAXPROCS=4 GORACE=halt_on_error=1 install: diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index f77819b168..072e99be31 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -241,60 +241,8 @@ func init() { ``` Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). -| Hook | Description | -| ----- | ----------- | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | -| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [Application Insights](https://github.com/jjcollinge/logrus-appinsights) | Hook for logging to [Application Insights](https://azure.microsoft.com/en-us/services/application-insights/) -| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage| -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | -| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka | -| [Kafka REST Proxy](https://github.com/Nordstrom/logrus-kafka-rest-proxy) | Hook for logging to [Kafka REST Proxy](https://docs.confluent.io/current/kafka-rest/docs) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) | -| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | -| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | -| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | -| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics | -| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | -| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) | -| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | -| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | -| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | +A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + #### Level logging @@ -372,6 +320,8 @@ The built-in logging formatters are: field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true`. For Windows, see [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). @@ -495,7 +445,7 @@ logrus.RegisterExitHandler(handler) #### Thread safety -By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. Situation when locking is not needed includes: diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 778f4c9f0d..473bd1a0d3 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -48,7 +48,7 @@ type Entry struct { func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, - // Default is three fields, give a little extra room + // Default is five fields, give a little extra room Data: make(Fields, 5), } } @@ -83,14 +83,28 @@ func (entry *Entry) WithFields(fields Fields) *Entry { for k, v := range fields { data[k] = v } - return &Entry{Logger: entry.Logger, Data: data} + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} } // This function is not declared with a pointer value because otherwise // race conditions will occur when using multiple goroutines func (entry Entry) log(level Level, msg string) { var buffer *bytes.Buffer - entry.Time = time.Now() + + // Default to now, but allow users to override if they want. + // + // We don't have to worry about polluting future calls to Entry#log() + // with this assignment because this function is declared with a + // non-pointer receiver. + if entry.Time.IsZero() { + entry.Time = time.Now() + } + entry.Level = level entry.Message = msg @@ -113,12 +127,10 @@ func (entry Entry) log(level Level, msg string) { } } -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) fireHooks() { +func (entry *Entry) fireHooks() { entry.Logger.mu.Lock() defer entry.Logger.mu.Unlock() - err := entry.Logger.Hooks.Fire(entry.Level, &entry) + err := entry.Logger.Hooks.Fire(entry.Level, entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) } diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go index 013183edab..eb612a6f3e 100644 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -2,6 +2,7 @@ package logrus import ( "io" + "time" ) var ( @@ -15,9 +16,7 @@ func StandardLogger() *Logger { // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out + std.SetOutput(out) } // SetFormatter sets the standard logger formatter. @@ -72,6 +71,15 @@ func WithFields(fields Fields) *Entry { return std.WithFields(fields) } +// WithTime creats an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) @@ -107,7 +115,7 @@ func Panic(args ...interface{}) { std.Panic(args...) } -// Fatal logs a message at level Fatal on the standard logger. +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatal(args ...interface{}) { std.Fatal(args...) } @@ -147,7 +155,7 @@ func Panicf(format string, args ...interface{}) { std.Panicf(format, args...) } -// Fatalf logs a message at level Fatal on the standard logger. +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } @@ -187,7 +195,7 @@ func Panicln(args ...interface{}) { std.Panicln(args...) } -// Fatalln logs a message at level Fatal on the standard logger. +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalln(args ...interface{}) { std.Fatalln(args...) } diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go index b183ff5b1d..83c74947be 100644 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -30,16 +30,22 @@ type Formatter interface { // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - if t, ok := data["time"]; ok { - data["fields.time"] = t +func prefixFieldClashes(data Fields, fieldMap FieldMap) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) } - if m, ok := data["msg"]; ok { - data["fields.msg"] = m + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) } - if l, ok := data["level"]; ok { - data["fields.level"] = l + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) } } diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go index fb01c1b104..dab17610f1 100644 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -33,6 +33,9 @@ type JSONFormatter struct { // DisableTimestamp allows disabling automatic timestamps in output DisableTimestamp bool + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + // FieldMap allows users to customize the names of keys for default fields. // As an example: // formatter := &JSONFormatter{ @@ -58,7 +61,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data[k] = v } } - prefixFieldClashes(data) + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap) timestampFormat := f.TimestampFormat if timestampFormat == "" { diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index fdaf8a6534..342f7977d8 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -5,6 +5,7 @@ import ( "os" "sync" "sync/atomic" + "time" ) type Logger struct { @@ -88,7 +89,7 @@ func (logger *Logger) releaseEntry(entry *Entry) { } // Adds a field to the log entry, note that it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. // If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { entry := logger.newEntry() @@ -112,6 +113,13 @@ func (logger *Logger) WithError(err error) *Entry { return entry.WithError(err) } +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + func (logger *Logger) Debugf(format string, args ...interface{}) { if logger.level() >= DebugLevel { entry := logger.newEntry() @@ -316,6 +324,12 @@ func (logger *Logger) SetLevel(level Level) { atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) } +func (logger *Logger) SetOutput(out io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = out +} + func (logger *Logger) AddHook(hook Hook) { logger.mu.Lock() defer logger.mu.Unlock() diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index 61b21caea4..3e55040304 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -20,6 +20,7 @@ const ( var ( baseTimestamp time.Time + emptyFieldMap FieldMap ) func init() { @@ -50,12 +51,24 @@ type TextFormatter struct { // be desired. DisableSorting bool + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool // Whether the logger's out is to a terminal isTerminal bool + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + sync.Once } @@ -67,7 +80,8 @@ func (f *TextFormatter) init(entry *Entry) { // Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var b *bytes.Buffer + prefixFieldClashes(entry.Data, f.FieldMap) + keys := make([]string, 0, len(entry.Data)) for k := range entry.Data { keys = append(keys, k) @@ -76,14 +90,14 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { if !f.DisableSorting { sort.Strings(keys) } + + var b *bytes.Buffer if entry.Buffer != nil { b = entry.Buffer } else { b = &bytes.Buffer{} } - prefixFieldClashes(entry.Data) - f.Do(func() { f.init(entry) }) isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors @@ -96,11 +110,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { f.printColored(b, entry, keys, timestampFormat) } else { if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyTime), entry.Time.Format(timestampFormat)) } - f.appendKeyValue(b, "level", entry.Level.String()) + f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyLevel), entry.Level.String()) if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) + f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyMsg), entry.Message) } for _, key := range keys { f.appendKeyValue(b, key, entry.Data[key]) @@ -124,7 +138,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelColor = blue } - levelText := strings.ToUpper(entry.Level.String())[0:4] + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation { + levelText = levelText[0:4] + } if f.DisableTimestamp { fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) diff --git a/vendor/github.com/urfave/cli/.flake8 b/vendor/github.com/urfave/cli/.flake8 new file mode 100644 index 0000000000..6deafc2617 --- /dev/null +++ b/vendor/github.com/urfave/cli/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 120 diff --git a/vendor/github.com/urfave/cli/.gitignore b/vendor/github.com/urfave/cli/.gitignore new file mode 100644 index 0000000000..faf70c4c24 --- /dev/null +++ b/vendor/github.com/urfave/cli/.gitignore @@ -0,0 +1,2 @@ +*.coverprofile +node_modules/ diff --git a/vendor/github.com/urfave/cli/.travis.yml b/vendor/github.com/urfave/cli/.travis.yml new file mode 100644 index 0000000000..cf8d0980dc --- /dev/null +++ b/vendor/github.com/urfave/cli/.travis.yml @@ -0,0 +1,27 @@ +language: go +sudo: false +dist: trusty +osx_image: xcode8.3 +go: 1.8.x + +os: +- linux +- osx + +cache: + directories: + - node_modules + +before_script: +- go get github.com/urfave/gfmrun/... || true +- go get golang.org/x/tools/cmd/goimports +- if [ ! -f node_modules/.bin/markdown-toc ] ; then + npm install markdown-toc ; + fi + +script: +- ./runtests gen +- ./runtests vet +- ./runtests test +- ./runtests gfmrun +- ./runtests toc diff --git a/vendor/github.com/urfave/cli/CHANGELOG.md b/vendor/github.com/urfave/cli/CHANGELOG.md new file mode 100644 index 0000000000..401eae5a2c --- /dev/null +++ b/vendor/github.com/urfave/cli/CHANGELOG.md @@ -0,0 +1,435 @@ +# Change Log + +**ATTN**: This project uses [semantic versioning](http://semver.org/). + +## [Unreleased] + +## 1.20.0 - 2017-08-10 + +### Fixed + +* `HandleExitCoder` is now correctly iterates over all errors in + a `MultiError`. The exit code is the exit code of the last error or `1` if + there are no `ExitCoder`s in the `MultiError`. +* Fixed YAML file loading on Windows (previously would fail validate the file path) +* Subcommand `Usage`, `Description`, `ArgsUsage`, `OnUsageError` correctly + propogated +* `ErrWriter` is now passed downwards through command structure to avoid the + need to redefine it +* Pass `Command` context into `OnUsageError` rather than parent context so that + all fields are avaiable +* Errors occuring in `Before` funcs are no longer double printed +* Use `UsageText` in the help templates for commands and subcommands if + defined; otherwise build the usage as before (was previously ignoring this + field) +* `IsSet` and `GlobalIsSet` now correctly return whether a flag is set if + a program calls `Set` or `GlobalSet` directly after flag parsing (would + previously only return `true` if the flag was set during parsing) + +### Changed + +* No longer exit the program on command/subcommand error if the error raised is + not an `OsExiter`. This exiting behavior was introduced in 1.19.0, but was + determined to be a regression in functionality. See [the + PR](https://github.com/urfave/cli/pull/595) for discussion. + +### Added + +* `CommandsByName` type was added to make it easy to sort `Command`s by name, + alphabetically +* `altsrc` now handles loading of string and int arrays from TOML +* Support for definition of custom help templates for `App` via + `CustomAppHelpTemplate` +* Support for arbitrary key/value fields on `App` to be used with + `CustomAppHelpTemplate` via `ExtraInfo` +* `HelpFlag`, `VersionFlag`, and `BashCompletionFlag` changed to explictly be + `cli.Flag`s allowing for the use of custom flags satisfying the `cli.Flag` + interface to be used. + + +## [1.19.1] - 2016-11-21 + +### Fixed + +- Fixes regression introduced in 1.19.0 where using an `ActionFunc` as + the `Action` for a command would cause it to error rather than calling the + function. Should not have a affected declarative cases using `func(c + *cli.Context) err)`. +- Shell completion now handles the case where the user specifies + `--generate-bash-completion` immediately after a flag that takes an argument. + Previously it call the application with `--generate-bash-completion` as the + flag value. + +## [1.19.0] - 2016-11-19 +### Added +- `FlagsByName` was added to make it easy to sort flags (e.g. `sort.Sort(cli.FlagsByName(app.Flags))`) +- A `Description` field was added to `App` for a more detailed description of + the application (similar to the existing `Description` field on `Command`) +- Flag type code generation via `go generate` +- Write to stderr and exit 1 if action returns non-nil error +- Added support for TOML to the `altsrc` loader +- `SkipArgReorder` was added to allow users to skip the argument reordering. + This is useful if you want to consider all "flags" after an argument as + arguments rather than flags (the default behavior of the stdlib `flag` + library). This is backported functionality from the [removal of the flag + reordering](https://github.com/urfave/cli/pull/398) in the unreleased version + 2 +- For formatted errors (those implementing `ErrorFormatter`), the errors will + be formatted during output. Compatible with `pkg/errors`. + +### Changed +- Raise minimum tested/supported Go version to 1.2+ + +### Fixed +- Consider empty environment variables as set (previously environment variables + with the equivalent of `""` would be skipped rather than their value used). +- Return an error if the value in a given environment variable cannot be parsed + as the flag type. Previously these errors were silently swallowed. +- Print full error when an invalid flag is specified (which includes the invalid flag) +- `App.Writer` defaults to `stdout` when `nil` +- If no action is specified on a command or app, the help is now printed instead of `panic`ing +- `App.Metadata` is initialized automatically now (previously was `nil` unless initialized) +- Correctly show help message if `-h` is provided to a subcommand +- `context.(Global)IsSet` now respects environment variables. Previously it + would return `false` if a flag was specified in the environment rather than + as an argument +- Removed deprecation warnings to STDERR to avoid them leaking to the end-user +- `altsrc`s import paths were updated to use `gopkg.in/urfave/cli.v1`. This + fixes issues that occurred when `gopkg.in/urfave/cli.v1` was imported as well + as `altsrc` where Go would complain that the types didn't match + +## [1.18.1] - 2016-08-28 +### Fixed +- Removed deprecation warnings to STDERR to avoid them leaking to the end-user (backported) + +## [1.18.0] - 2016-06-27 +### Added +- `./runtests` test runner with coverage tracking by default +- testing on OS X +- testing on Windows +- `UintFlag`, `Uint64Flag`, and `Int64Flag` types and supporting code + +### Changed +- Use spaces for alignment in help/usage output instead of tabs, making the + output alignment consistent regardless of tab width + +### Fixed +- Printing of command aliases in help text +- Printing of visible flags for both struct and struct pointer flags +- Display the `help` subcommand when using `CommandCategories` +- No longer swallows `panic`s that occur within the `Action`s themselves when + detecting the signature of the `Action` field + +## [1.17.1] - 2016-08-28 +### Fixed +- Removed deprecation warnings to STDERR to avoid them leaking to the end-user + +## [1.17.0] - 2016-05-09 +### Added +- Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc` +- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool` +- Support for hiding commands by setting `Hidden: true` -- this will hide the + commands in help output + +### Changed +- `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer + quoted in help text output. +- All flag types now include `(default: {value})` strings following usage when a + default value can be (reasonably) detected. +- `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent + with non-slice flag types +- Apps now exit with a code of 3 if an unknown subcommand is specified + (previously they printed "No help topic for...", but still exited 0. This + makes it easier to script around apps built using `cli` since they can trust + that a 0 exit code indicated a successful execution. +- cleanups based on [Go Report Card + feedback](https://goreportcard.com/report/github.com/urfave/cli) + +## [1.16.1] - 2016-08-28 +### Fixed +- Removed deprecation warnings to STDERR to avoid them leaking to the end-user + +## [1.16.0] - 2016-05-02 +### Added +- `Hidden` field on all flag struct types to omit from generated help text + +### Changed +- `BashCompletionFlag` (`--enable-bash-completion`) is now omitted from +generated help text via the `Hidden` field + +### Fixed +- handling of error values in `HandleAction` and `HandleExitCoder` + +## [1.15.0] - 2016-04-30 +### Added +- This file! +- Support for placeholders in flag usage strings +- `App.Metadata` map for arbitrary data/state management +- `Set` and `GlobalSet` methods on `*cli.Context` for altering values after +parsing. +- Support for nested lookup of dot-delimited keys in structures loaded from +YAML. + +### Changed +- The `App.Action` and `Command.Action` now prefer a return signature of +`func(*cli.Context) error`, as defined by `cli.ActionFunc`. If a non-nil +`error` is returned, there may be two outcomes: + - If the error fulfills `cli.ExitCoder`, then `os.Exit` will be called + automatically + - Else the error is bubbled up and returned from `App.Run` +- Specifying an `Action` with the legacy return signature of +`func(*cli.Context)` will produce a deprecation message to stderr +- Specifying an `Action` that is not a `func` type will produce a non-zero exit +from `App.Run` +- Specifying an `Action` func that has an invalid (input) signature will +produce a non-zero exit from `App.Run` + +### Deprecated +- +`cli.App.RunAndExitOnError`, which should now be done by returning an error +that fulfills `cli.ExitCoder` to `cli.App.Run`. +- the legacy signature for +`cli.App.Action` of `func(*cli.Context)`, which should now have a return +signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`. + +### Fixed +- Added missing `*cli.Context.GlobalFloat64` method + +## [1.14.0] - 2016-04-03 (backfilled 2016-04-25) +### Added +- Codebeat badge +- Support for categorization via `CategorizedHelp` and `Categories` on app. + +### Changed +- Use `filepath.Base` instead of `path.Base` in `Name` and `HelpName`. + +### Fixed +- Ensure version is not shown in help text when `HideVersion` set. + +## [1.13.0] - 2016-03-06 (backfilled 2016-04-25) +### Added +- YAML file input support. +- `NArg` method on context. + +## [1.12.0] - 2016-02-17 (backfilled 2016-04-25) +### Added +- Custom usage error handling. +- Custom text support in `USAGE` section of help output. +- Improved help messages for empty strings. +- AppVeyor CI configuration. + +### Changed +- Removed `panic` from default help printer func. +- De-duping and optimizations. + +### Fixed +- Correctly handle `Before`/`After` at command level when no subcommands. +- Case of literal `-` argument causing flag reordering. +- Environment variable hints on Windows. +- Docs updates. + +## [1.11.1] - 2015-12-21 (backfilled 2016-04-25) +### Changed +- Use `path.Base` in `Name` and `HelpName` +- Export `GetName` on flag types. + +### Fixed +- Flag parsing when skipping is enabled. +- Test output cleanup. +- Move completion check to account for empty input case. + +## [1.11.0] - 2015-11-15 (backfilled 2016-04-25) +### Added +- Destination scan support for flags. +- Testing against `tip` in Travis CI config. + +### Changed +- Go version in Travis CI config. + +### Fixed +- Removed redundant tests. +- Use correct example naming in tests. + +## [1.10.2] - 2015-10-29 (backfilled 2016-04-25) +### Fixed +- Remove unused var in bash completion. + +## [1.10.1] - 2015-10-21 (backfilled 2016-04-25) +### Added +- Coverage and reference logos in README. + +### Fixed +- Use specified values in help and version parsing. +- Only display app version and help message once. + +## [1.10.0] - 2015-10-06 (backfilled 2016-04-25) +### Added +- More tests for existing functionality. +- `ArgsUsage` at app and command level for help text flexibility. + +### Fixed +- Honor `HideHelp` and `HideVersion` in `App.Run`. +- Remove juvenile word from README. + +## [1.9.0] - 2015-09-08 (backfilled 2016-04-25) +### Added +- `FullName` on command with accompanying help output update. +- Set default `$PROG` in bash completion. + +### Changed +- Docs formatting. + +### Fixed +- Removed self-referential imports in tests. + +## [1.8.0] - 2015-06-30 (backfilled 2016-04-25) +### Added +- Support for `Copyright` at app level. +- `Parent` func at context level to walk up context lineage. + +### Fixed +- Global flag processing at top level. + +## [1.7.1] - 2015-06-11 (backfilled 2016-04-25) +### Added +- Aggregate errors from `Before`/`After` funcs. +- Doc comments on flag structs. +- Include non-global flags when checking version and help. +- Travis CI config updates. + +### Fixed +- Ensure slice type flags have non-nil values. +- Collect global flags from the full command hierarchy. +- Docs prose. + +## [1.7.0] - 2015-05-03 (backfilled 2016-04-25) +### Changed +- `HelpPrinter` signature includes output writer. + +### Fixed +- Specify go 1.1+ in docs. +- Set `Writer` when running command as app. + +## [1.6.0] - 2015-03-23 (backfilled 2016-04-25) +### Added +- Multiple author support. +- `NumFlags` at context level. +- `Aliases` at command level. + +### Deprecated +- `ShortName` at command level. + +### Fixed +- Subcommand help output. +- Backward compatible support for deprecated `Author` and `Email` fields. +- Docs regarding `Names`/`Aliases`. + +## [1.5.0] - 2015-02-20 (backfilled 2016-04-25) +### Added +- `After` hook func support at app and command level. + +### Fixed +- Use parsed context when running command as subcommand. +- Docs prose. + +## [1.4.1] - 2015-01-09 (backfilled 2016-04-25) +### Added +- Support for hiding `-h / --help` flags, but not `help` subcommand. +- Stop flag parsing after `--`. + +### Fixed +- Help text for generic flags to specify single value. +- Use double quotes in output for defaults. +- Use `ParseInt` instead of `ParseUint` for int environment var values. +- Use `0` as base when parsing int environment var values. + +## [1.4.0] - 2014-12-12 (backfilled 2016-04-25) +### Added +- Support for environment variable lookup "cascade". +- Support for `Stdout` on app for output redirection. + +### Fixed +- Print command help instead of app help in `ShowCommandHelp`. + +## [1.3.1] - 2014-11-13 (backfilled 2016-04-25) +### Added +- Docs and example code updates. + +### Changed +- Default `-v / --version` flag made optional. + +## [1.3.0] - 2014-08-10 (backfilled 2016-04-25) +### Added +- `FlagNames` at context level. +- Exposed `VersionPrinter` var for more control over version output. +- Zsh completion hook. +- `AUTHOR` section in default app help template. +- Contribution guidelines. +- `DurationFlag` type. + +## [1.2.0] - 2014-08-02 +### Added +- Support for environment variable defaults on flags plus tests. + +## [1.1.0] - 2014-07-15 +### Added +- Bash completion. +- Optional hiding of built-in help command. +- Optional skipping of flag parsing at command level. +- `Author`, `Email`, and `Compiled` metadata on app. +- `Before` hook func support at app and command level. +- `CommandNotFound` func support at app level. +- Command reference available on context. +- `GenericFlag` type. +- `Float64Flag` type. +- `BoolTFlag` type. +- `IsSet` flag helper on context. +- More flag lookup funcs at context level. +- More tests & docs. + +### Changed +- Help template updates to account for presence/absence of flags. +- Separated subcommand help template. +- Exposed `HelpPrinter` var for more control over help output. + +## [1.0.0] - 2013-11-01 +### Added +- `help` flag in default app flag set and each command flag set. +- Custom handling of argument parsing errors. +- Command lookup by name at app level. +- `StringSliceFlag` type and supporting `StringSlice` type. +- `IntSliceFlag` type and supporting `IntSlice` type. +- Slice type flag lookups by name at context level. +- Export of app and command help functions. +- More tests & docs. + +## 0.1.0 - 2013-07-22 +### Added +- Initial implementation. + +[Unreleased]: https://github.com/urfave/cli/compare/v1.18.0...HEAD +[1.18.0]: https://github.com/urfave/cli/compare/v1.17.0...v1.18.0 +[1.17.0]: https://github.com/urfave/cli/compare/v1.16.0...v1.17.0 +[1.16.0]: https://github.com/urfave/cli/compare/v1.15.0...v1.16.0 +[1.15.0]: https://github.com/urfave/cli/compare/v1.14.0...v1.15.0 +[1.14.0]: https://github.com/urfave/cli/compare/v1.13.0...v1.14.0 +[1.13.0]: https://github.com/urfave/cli/compare/v1.12.0...v1.13.0 +[1.12.0]: https://github.com/urfave/cli/compare/v1.11.1...v1.12.0 +[1.11.1]: https://github.com/urfave/cli/compare/v1.11.0...v1.11.1 +[1.11.0]: https://github.com/urfave/cli/compare/v1.10.2...v1.11.0 +[1.10.2]: https://github.com/urfave/cli/compare/v1.10.1...v1.10.2 +[1.10.1]: https://github.com/urfave/cli/compare/v1.10.0...v1.10.1 +[1.10.0]: https://github.com/urfave/cli/compare/v1.9.0...v1.10.0 +[1.9.0]: https://github.com/urfave/cli/compare/v1.8.0...v1.9.0 +[1.8.0]: https://github.com/urfave/cli/compare/v1.7.1...v1.8.0 +[1.7.1]: https://github.com/urfave/cli/compare/v1.7.0...v1.7.1 +[1.7.0]: https://github.com/urfave/cli/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/urfave/cli/compare/v1.5.0...v1.6.0 +[1.5.0]: https://github.com/urfave/cli/compare/v1.4.1...v1.5.0 +[1.4.1]: https://github.com/urfave/cli/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/urfave/cli/compare/v1.3.1...v1.4.0 +[1.3.1]: https://github.com/urfave/cli/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/urfave/cli/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/urfave/cli/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/urfave/cli/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/urfave/cli/compare/v0.1.0...v1.0.0 diff --git a/vendor/github.com/urfave/cli/LICENSE b/vendor/github.com/urfave/cli/LICENSE new file mode 100644 index 0000000000..42a597e29b --- /dev/null +++ b/vendor/github.com/urfave/cli/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Jeremy Saenz & Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md new file mode 100644 index 0000000000..2bbbd8ea97 --- /dev/null +++ b/vendor/github.com/urfave/cli/README.md @@ -0,0 +1,1381 @@ +cli +=== + +[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli) +[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli) +[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) +[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) +[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) +[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) / +[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc) + +**Notice:** This is the library formerly known as +`github.com/codegangsta/cli` -- Github will automatically redirect requests +to this repository, but we recommend updating your references for clarity. + +cli is a simple, fast, and fun package for building command line apps in Go. The +goal is to enable developers to write fast and distributable command line +applications in an expressive way. + + + +- [Overview](#overview) +- [Installation](#installation) + * [Supported platforms](#supported-platforms) + * [Using the `v2` branch](#using-the-v2-branch) + * [Pinning to the `v1` releases](#pinning-to-the-v1-releases) +- [Getting Started](#getting-started) +- [Examples](#examples) + * [Arguments](#arguments) + * [Flags](#flags) + + [Placeholder Values](#placeholder-values) + + [Alternate Names](#alternate-names) + + [Ordering](#ordering) + + [Values from the Environment](#values-from-the-environment) + + [Values from alternate input sources (YAML, TOML, and others)](#values-from-alternate-input-sources-yaml-toml-and-others) + * [Subcommands](#subcommands) + * [Subcommands categories](#subcommands-categories) + * [Exit code](#exit-code) + * [Bash Completion](#bash-completion) + + [Enabling](#enabling) + + [Distribution](#distribution) + + [Customization](#customization) + * [Generated Help Text](#generated-help-text) + + [Customization](#customization-1) + * [Version Flag](#version-flag) + + [Customization](#customization-2) + + [Full API Example](#full-api-example) +- [Contribution Guidelines](#contribution-guidelines) + + + +## Overview + +Command line apps are usually so tiny that there is absolutely no reason why +your code should *not* be self-documenting. Things like generating help text and +parsing command flags/options should not hinder productivity when writing a +command line app. + +**This is where cli comes into play.** cli makes command line programming fun, +organized, and expressive! + +## Installation + +Make sure you have a working Go environment. Go version 1.2+ is supported. [See +the install instructions for Go](http://golang.org/doc/install.html). + +To install cli, simply run: +``` +$ go get github.com/urfave/cli +``` + +Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can +be easily used: +``` +export PATH=$PATH:$GOPATH/bin +``` + +### Supported platforms + +cli is tested against multiple versions of Go on Linux, and against the latest +released version of Go on OS X and Windows. For full details, see +[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml). + +### Using the `v2` branch + +**Warning**: The `v2` branch is currently unreleased and considered unstable. + +There is currently a long-lived branch named `v2` that is intended to land as +the new `master` branch once development there has settled down. The current +`master` branch (mirrored as `v1`) is being manually merged into `v2` on +an irregular human-based schedule, but generally if one wants to "upgrade" to +`v2` *now* and accept the volatility (read: "awesomeness") that comes along with +that, please use whatever version pinning of your preference, such as via +`gopkg.in`: + +``` +$ go get gopkg.in/urfave/cli.v2 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v2" // imports as package "cli" +) +... +``` + +### Pinning to the `v1` releases + +Similarly to the section above describing use of the `v2` branch, if one wants +to avoid any unexpected compatibility pains once `v2` becomes `master`, then +pinning to `v1` is an acceptable option, e.g.: + +``` +$ go get gopkg.in/urfave/cli.v1 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v1" // imports as package "cli" +) +... +``` + +This will pull the latest tagged `v1` release (e.g. `v1.18.1` at the time of writing). + +## Getting Started + +One of the philosophies behind cli is that an API should be playful and full of +discovery. So a cli app can be as little as one line of code in `main()`. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.NewApp().Run(os.Args) +} +``` + +This app will run and show help text, but is not very useful. Let's give an +action to execute and some help documentation: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "boom" + app.Usage = "make an explosive entrance" + app.Action = func(c *cli.Context) error { + fmt.Println("boom! I say!") + return nil + } + + app.Run(os.Args) +} +``` + +Running this already gives you a ton of functionality, plus support for things +like subcommands and flags, which are covered below. + +## Examples + +Being a programmer can be a lonely job. Thankfully by the power of automation +that is not the case! Let's create a greeter app to fend off our demons of +loneliness! + +Start by creating a directory named `greet`, and within it, add a file, +`greet.go` with the following code in it: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "greet" + app.Usage = "fight the loneliness!" + app.Action = func(c *cli.Context) error { + fmt.Println("Hello friend!") + return nil + } + + app.Run(os.Args) +} +``` + +Install our command to the `$GOPATH/bin` directory: + +``` +$ go install +``` + +Finally run our new command: + +``` +$ greet +Hello friend! +``` + +cli also generates neat help text: + +``` +$ greet help +NAME: + greet - fight the loneliness! + +USAGE: + greet [global options] command [command options] [arguments...] + +VERSION: + 0.0.0 + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS + --version Shows version information +``` + +### Arguments + +You can lookup arguments by calling the `Args` function on `cli.Context`, e.g.: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Action = func(c *cli.Context) error { + fmt.Printf("Hello %q", c.Args().Get(0)) + return nil + } + + app.Run(os.Args) +} +``` + +### Flags + +Setting and querying flags is simple. + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + }, + } + + app.Action = func(c *cli.Context) error { + name := "Nefertiti" + if c.NArg() > 0 { + name = c.Args().Get(0) + } + if c.String("lang") == "spanish" { + fmt.Println("Hola", name) + } else { + fmt.Println("Hello", name) + } + return nil + } + + app.Run(os.Args) +} +``` + +You can also set a destination variable for a flag, to which the content will be +scanned. + + +``` go +package main + +import ( + "os" + "fmt" + + "github.com/urfave/cli" +) + +func main() { + var language string + + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + Destination: &language, + }, + } + + app.Action = func(c *cli.Context) error { + name := "someone" + if c.NArg() > 0 { + name = c.Args()[0] + } + if language == "spanish" { + fmt.Println("Hola", name) + } else { + fmt.Println("Hello", name) + } + return nil + } + + app.Run(os.Args) +} +``` + +See full list of flags at http://godoc.org/github.com/urfave/cli + +#### Placeholder Values + +Sometimes it's useful to specify a flag's value within the usage string itself. +Such placeholders are indicated with back quotes. + +For example this: + + +```go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "config, c", + Usage: "Load configuration from `FILE`", + }, + } + + app.Run(os.Args) +} +``` + +Will result in help output like: + +``` +--config FILE, -c FILE Load configuration from FILE +``` + +Note that only the first placeholder is used. Subsequent back-quoted words will +be left as-is. + +#### Alternate Names + +You can set alternate (or short) names for flags by providing a comma-delimited +list for the `Name`. e.g. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + }, + } + + app.Run(os.Args) +} +``` + +That flag can then be set with `--lang spanish` or `-l spanish`. Note that +giving two different forms of the same flag in the same command invocation is an +error. + +#### Ordering + +Flags for the application and commands are shown in the order they are defined. +However, it's possible to sort them from outside this library by using `FlagsByName` +or `CommandsByName` with `sort`. + +For example this: + + +``` go +package main + +import ( + "os" + "sort" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "Language for the greeting", + }, + cli.StringFlag{ + Name: "config, c", + Usage: "Load configuration from `FILE`", + }, + } + + app.Commands = []cli.Command{ + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + return nil + }, + }, + { + Name: "add", + Aliases: []string{"a"}, + Usage: "add a task to the list", + Action: func(c *cli.Context) error { + return nil + }, + }, + } + + sort.Sort(cli.FlagsByName(app.Flags)) + sort.Sort(cli.CommandsByName(app.Commands)) + + app.Run(os.Args) +} +``` + +Will result in help output like: + +``` +--config FILE, -c FILE Load configuration from FILE +--lang value, -l value Language for the greeting (default: "english") +``` + +#### Values from the Environment + +You can also have the default value set from the environment via `EnvVar`. e.g. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "APP_LANG", + }, + } + + app.Run(os.Args) +} +``` + +The `EnvVar` may also be given as a comma-delimited "cascade", where the first +environment variable that resolves is used as the default. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", + }, + } + + app.Run(os.Args) +} +``` + +#### Values from alternate input sources (YAML, TOML, and others) + +There is a separate package altsrc that adds support for getting flag values +from other file input sources. + +Currently supported input source formats: +* YAML +* TOML + +In order to get values for a flag from an alternate input source the following +code would be added to wrap an existing cli.Flag like below: + +``` go + altsrc.NewIntFlag(cli.IntFlag{Name: "test"}) +``` + +Initialization must also occur for these flags. Below is an example initializing +getting data from a yaml file below. + +``` go + command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) +``` + +The code above will use the "load" string as a flag name to get the file name of +a yaml file from the cli.Context. It will then use that file name to initialize +the yaml input source for any flags that are defined on that command. As a note +the "load" flag used would also have to be defined on the command flags in order +for this code snipped to work. + +Currently only the aboved specified formats are supported but developers can +add support for other input sources by implementing the +altsrc.InputSourceContext for their given sources. + +Here is a more complete sample of a command using YAML support: + + +``` go +package notmain + +import ( + "fmt" + "os" + + "github.com/urfave/cli" + "github.com/urfave/cli/altsrc" +) + +func main() { + app := cli.NewApp() + + flags := []cli.Flag{ + altsrc.NewIntFlag(cli.IntFlag{Name: "test"}), + cli.StringFlag{Name: "load"}, + } + + app.Action = func(c *cli.Context) error { + fmt.Println("yaml ist rad") + return nil + } + + app.Before = altsrc.InitInputSourceWithContext(flags, altsrc.NewYamlSourceFromFlagFunc("load")) + app.Flags = flags + + app.Run(os.Args) +} +``` + +### Subcommands + +Subcommands can be defined for a more git-like command line app. + + +```go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Commands = []cli.Command{ + { + Name: "add", + Aliases: []string{"a"}, + Usage: "add a task to the list", + Action: func(c *cli.Context) error { + fmt.Println("added task: ", c.Args().First()) + return nil + }, + }, + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil + }, + }, + { + Name: "template", + Aliases: []string{"t"}, + Usage: "options for task templates", + Subcommands: []cli.Command{ + { + Name: "add", + Usage: "add a new template", + Action: func(c *cli.Context) error { + fmt.Println("new task template: ", c.Args().First()) + return nil + }, + }, + { + Name: "remove", + Usage: "remove an existing template", + Action: func(c *cli.Context) error { + fmt.Println("removed task template: ", c.Args().First()) + return nil + }, + }, + }, + }, + } + + app.Run(os.Args) +} +``` + +### Subcommands categories + +For additional organization in apps that have many subcommands, you can +associate a category for each command to group them together in the help +output. + +E.g. + +```go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Commands = []cli.Command{ + { + Name: "noop", + }, + { + Name: "add", + Category: "template", + }, + { + Name: "remove", + Category: "template", + }, + } + + app.Run(os.Args) +} +``` + +Will include: + +``` +COMMANDS: + noop + + Template actions: + add + remove +``` + +### Exit code + +Calling `App.Run` will not automatically call `os.Exit`, which means that by +default the exit code will "fall through" to being `0`. An explicit exit code +may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a +`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.: + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Flags = []cli.Flag{ + cli.BoolTFlag{ + Name: "ginger-crouton", + Usage: "is it in the soup?", + }, + } + app.Action = func(ctx *cli.Context) error { + if !ctx.Bool("ginger-crouton") { + return cli.NewExitError("it is not in the soup", 86) + } + return nil + } + + app.Run(os.Args) +} +``` + +### Bash Completion + +You can enable completion commands by setting the `EnableBashCompletion` +flag on the `App` object. By default, this setting will only auto-complete to +show an app's subcommands, but you can write your own completion methods for +the App or its subcommands. + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + tasks := []string{"cook", "clean", "laundry", "eat", "sleep", "code"} + + app := cli.NewApp() + app.EnableBashCompletion = true + app.Commands = []cli.Command{ + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil + }, + BashComplete: func(c *cli.Context) { + // This will complete if no args are passed + if c.NArg() > 0 { + return + } + for _, t := range tasks { + fmt.Println(t) + } + }, + }, + } + + app.Run(os.Args) +} +``` + +#### Enabling + +Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while +setting the `PROG` variable to the name of your program: + +`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` + +#### Distribution + +Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename +it to the name of the program you wish to add autocomplete support for (or +automatically install it there if you are distributing a package). Don't forget +to source the file to make it active in the current shell. + +``` +sudo cp src/bash_autocomplete /etc/bash_completion.d/ +source /etc/bash_completion.d/ +``` + +Alternatively, you can just document that users should source the generic +`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set +to the name of their program (as above). + +#### Customization + +The default bash completion flag (`--generate-bash-completion`) is defined as +`cli.BashCompletionFlag`, and may be redefined if desired, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.BashCompletionFlag = cli.BoolFlag{ + Name: "compgen", + Hidden: true, + } + + app := cli.NewApp() + app.EnableBashCompletion = true + app.Commands = []cli.Command{ + { + Name: "wat", + }, + } + app.Run(os.Args) +} +``` + +### Generated Help Text + +The default help flag (`-h/--help`) is defined as `cli.HelpFlag` and is checked +by the cli internals in order to print generated help text for the app, command, +or subcommand, and break execution. + +#### Customization + +All of the help text generation may be customized, and at multiple levels. The +templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and +`SubcommandHelpTemplate` which may be reassigned or augmented, and full override +is possible by assigning a compatible func to the `cli.HelpPrinter` variable, +e.g.: + + +``` go +package main + +import ( + "fmt" + "io" + "os" + + "github.com/urfave/cli" +) + +func main() { + // EXAMPLE: Append to an existing template + cli.AppHelpTemplate = fmt.Sprintf(`%s + +WEBSITE: http://awesometown.example.com + +SUPPORT: support@awesometown.example.com + +`, cli.AppHelpTemplate) + + // EXAMPLE: Override a template + cli.AppHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{if len .Authors}} +AUTHOR: + {{range .Authors}}{{ . }}{{end}} + {{end}}{{if .Commands}} +COMMANDS: +{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}} +GLOBAL OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}}{{if .Copyright }} +COPYRIGHT: + {{.Copyright}} + {{end}}{{if .Version}} +VERSION: + {{.Version}} + {{end}} +` + + // EXAMPLE: Replace the `HelpPrinter` func + cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { + fmt.Println("Ha HA. I pwnd the help!!1") + } + + cli.NewApp().Run(os.Args) +} +``` + +The default flag may be customized to something other than `-h/--help` by +setting `cli.HelpFlag`, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.HelpFlag = cli.BoolFlag{ + Name: "halp, haaaaalp", + Usage: "HALP", + EnvVar: "SHOW_HALP,HALPPLZ", + } + + cli.NewApp().Run(os.Args) +} +``` + +### Version Flag + +The default version flag (`-v/--version`) is defined as `cli.VersionFlag`, which +is checked by the cli internals in order to print the `App.Version` via +`cli.VersionPrinter` and break execution. + +#### Customization + +The default flag may be customized to something other than `-v/--version` by +setting `cli.VersionFlag`, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.VersionFlag = cli.BoolFlag{ + Name: "print-version, V", + Usage: "print only the version", + } + + app := cli.NewApp() + app.Name = "partay" + app.Version = "19.99.0" + app.Run(os.Args) +} +``` + +Alternatively, the version printer at `cli.VersionPrinter` may be overridden, e.g.: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +var ( + Revision = "fafafaf" +) + +func main() { + cli.VersionPrinter = func(c *cli.Context) { + fmt.Printf("version=%s revision=%s\n", c.App.Version, Revision) + } + + app := cli.NewApp() + app.Name = "partay" + app.Version = "19.99.0" + app.Run(os.Args) +} +``` + +#### Full API Example + +**Notice**: This is a contrived (functioning) example meant strictly for API +demonstration purposes. Use of one's imagination is encouraged. + + +``` go +package main + +import ( + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/urfave/cli" +) + +func init() { + cli.AppHelpTemplate += "\nCUSTOMIZED: you bet ur muffins\n" + cli.CommandHelpTemplate += "\nYMMV\n" + cli.SubcommandHelpTemplate += "\nor something\n" + + cli.HelpFlag = cli.BoolFlag{Name: "halp"} + cli.BashCompletionFlag = cli.BoolFlag{Name: "compgen", Hidden: true} + cli.VersionFlag = cli.BoolFlag{Name: "print-version, V"} + + cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { + fmt.Fprintf(w, "best of luck to you\n") + } + cli.VersionPrinter = func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "version=%s\n", c.App.Version) + } + cli.OsExiter = func(c int) { + fmt.Fprintf(cli.ErrWriter, "refusing to exit %d\n", c) + } + cli.ErrWriter = ioutil.Discard + cli.FlagStringer = func(fl cli.Flag) string { + return fmt.Sprintf("\t\t%s", fl.GetName()) + } +} + +type hexWriter struct{} + +func (w *hexWriter) Write(p []byte) (int, error) { + for _, b := range p { + fmt.Printf("%x", b) + } + fmt.Printf("\n") + + return len(p), nil +} + +type genericType struct{ + s string +} + +func (g *genericType) Set(value string) error { + g.s = value + return nil +} + +func (g *genericType) String() string { + return g.s +} + +func main() { + app := cli.NewApp() + app.Name = "kənˈtrīv" + app.Version = "19.99.0" + app.Compiled = time.Now() + app.Authors = []cli.Author{ + cli.Author{ + Name: "Example Human", + Email: "human@example.com", + }, + } + app.Copyright = "(c) 1999 Serious Enterprise" + app.HelpName = "contrive" + app.Usage = "demonstrate available API" + app.UsageText = "contrive - demonstrating the available API" + app.ArgsUsage = "[args and such]" + app.Commands = []cli.Command{ + cli.Command{ + Name: "doo", + Aliases: []string{"do"}, + Category: "motion", + Usage: "do the doo", + UsageText: "doo - does the dooing", + Description: "no really, there is a lot of dooing to be done", + ArgsUsage: "[arrgh]", + Flags: []cli.Flag{ + cli.BoolFlag{Name: "forever, forevvarr"}, + }, + Subcommands: cli.Commands{ + cli.Command{ + Name: "wop", + Action: wopAction, + }, + }, + SkipFlagParsing: false, + HideHelp: false, + Hidden: false, + HelpName: "doo!", + BashComplete: func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "--better\n") + }, + Before: func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "brace for impact\n") + return nil + }, + After: func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "did we lose anyone?\n") + return nil + }, + Action: func(c *cli.Context) error { + c.Command.FullName() + c.Command.HasName("wop") + c.Command.Names() + c.Command.VisibleFlags() + fmt.Fprintf(c.App.Writer, "dodododododoodododddooooododododooo\n") + if c.Bool("forever") { + c.Command.Run(c) + } + return nil + }, + OnUsageError: func(c *cli.Context, err error, isSubcommand bool) error { + fmt.Fprintf(c.App.Writer, "for shame\n") + return err + }, + }, + } + app.Flags = []cli.Flag{ + cli.BoolFlag{Name: "fancy"}, + cli.BoolTFlag{Name: "fancier"}, + cli.DurationFlag{Name: "howlong, H", Value: time.Second * 3}, + cli.Float64Flag{Name: "howmuch"}, + cli.GenericFlag{Name: "wat", Value: &genericType{}}, + cli.Int64Flag{Name: "longdistance"}, + cli.Int64SliceFlag{Name: "intervals"}, + cli.IntFlag{Name: "distance"}, + cli.IntSliceFlag{Name: "times"}, + cli.StringFlag{Name: "dance-move, d"}, + cli.StringSliceFlag{Name: "names, N"}, + cli.UintFlag{Name: "age"}, + cli.Uint64Flag{Name: "bigage"}, + } + app.EnableBashCompletion = true + app.HideHelp = false + app.HideVersion = false + app.BashComplete = func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "lipstick\nkiss\nme\nlipstick\nringo\n") + } + app.Before = func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "HEEEERE GOES\n") + return nil + } + app.After = func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "Phew!\n") + return nil + } + app.CommandNotFound = func(c *cli.Context, command string) { + fmt.Fprintf(c.App.Writer, "Thar be no %q here.\n", command) + } + app.OnUsageError = func(c *cli.Context, err error, isSubcommand bool) error { + if isSubcommand { + return err + } + + fmt.Fprintf(c.App.Writer, "WRONG: %#v\n", err) + return nil + } + app.Action = func(c *cli.Context) error { + cli.DefaultAppComplete(c) + cli.HandleExitCoder(errors.New("not an exit coder, though")) + cli.ShowAppHelp(c) + cli.ShowCommandCompletions(c, "nope") + cli.ShowCommandHelp(c, "also-nope") + cli.ShowCompletions(c) + cli.ShowSubcommandHelp(c) + cli.ShowVersion(c) + + categories := c.App.Categories() + categories.AddCommand("sounds", cli.Command{ + Name: "bloop", + }) + + for _, category := range c.App.Categories() { + fmt.Fprintf(c.App.Writer, "%s\n", category.Name) + fmt.Fprintf(c.App.Writer, "%#v\n", category.Commands) + fmt.Fprintf(c.App.Writer, "%#v\n", category.VisibleCommands()) + } + + fmt.Printf("%#v\n", c.App.Command("doo")) + if c.Bool("infinite") { + c.App.Run([]string{"app", "doo", "wop"}) + } + + if c.Bool("forevar") { + c.App.RunAsSubcommand(c) + } + c.App.Setup() + fmt.Printf("%#v\n", c.App.VisibleCategories()) + fmt.Printf("%#v\n", c.App.VisibleCommands()) + fmt.Printf("%#v\n", c.App.VisibleFlags()) + + fmt.Printf("%#v\n", c.Args().First()) + if len(c.Args()) > 0 { + fmt.Printf("%#v\n", c.Args()[1]) + } + fmt.Printf("%#v\n", c.Args().Present()) + fmt.Printf("%#v\n", c.Args().Tail()) + + set := flag.NewFlagSet("contrive", 0) + nc := cli.NewContext(c.App, set, c) + + fmt.Printf("%#v\n", nc.Args()) + fmt.Printf("%#v\n", nc.Bool("nope")) + fmt.Printf("%#v\n", nc.BoolT("nerp")) + fmt.Printf("%#v\n", nc.Duration("howlong")) + fmt.Printf("%#v\n", nc.Float64("hay")) + fmt.Printf("%#v\n", nc.Generic("bloop")) + fmt.Printf("%#v\n", nc.Int64("bonk")) + fmt.Printf("%#v\n", nc.Int64Slice("burnks")) + fmt.Printf("%#v\n", nc.Int("bips")) + fmt.Printf("%#v\n", nc.IntSlice("blups")) + fmt.Printf("%#v\n", nc.String("snurt")) + fmt.Printf("%#v\n", nc.StringSlice("snurkles")) + fmt.Printf("%#v\n", nc.Uint("flub")) + fmt.Printf("%#v\n", nc.Uint64("florb")) + fmt.Printf("%#v\n", nc.GlobalBool("global-nope")) + fmt.Printf("%#v\n", nc.GlobalBoolT("global-nerp")) + fmt.Printf("%#v\n", nc.GlobalDuration("global-howlong")) + fmt.Printf("%#v\n", nc.GlobalFloat64("global-hay")) + fmt.Printf("%#v\n", nc.GlobalGeneric("global-bloop")) + fmt.Printf("%#v\n", nc.GlobalInt("global-bips")) + fmt.Printf("%#v\n", nc.GlobalIntSlice("global-blups")) + fmt.Printf("%#v\n", nc.GlobalString("global-snurt")) + fmt.Printf("%#v\n", nc.GlobalStringSlice("global-snurkles")) + + fmt.Printf("%#v\n", nc.FlagNames()) + fmt.Printf("%#v\n", nc.GlobalFlagNames()) + fmt.Printf("%#v\n", nc.GlobalIsSet("wat")) + fmt.Printf("%#v\n", nc.GlobalSet("wat", "nope")) + fmt.Printf("%#v\n", nc.NArg()) + fmt.Printf("%#v\n", nc.NumFlags()) + fmt.Printf("%#v\n", nc.Parent()) + + nc.Set("wat", "also-nope") + + ec := cli.NewExitError("ohwell", 86) + fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode()) + fmt.Printf("made it!\n") + return ec + } + + if os.Getenv("HEXY") != "" { + app.Writer = &hexWriter{} + app.ErrWriter = &hexWriter{} + } + + app.Metadata = map[string]interface{}{ + "layers": "many", + "explicable": false, + "whatever-values": 19.99, + } + + app.Run(os.Args) +} + +func wopAction(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, ":wave: over here, eh\n") + return nil +} +``` + +## Contribution Guidelines + +Feel free to put up a pull request to fix a bug or maybe add a feature. I will +give it a code review and make sure that it does not break backwards +compatibility. If I or any other collaborators agree that it is in line with +the vision of the project, we will work with you to get the code into +a mergeable state and merge it into the master branch. + +If you have contributed something significant to the project, we will most +likely add you as a collaborator. As a collaborator you are given the ability +to merge others pull requests. It is very important that new code does not +break existing code, so be careful about what code you do choose to merge. + +If you feel like you have contributed to the project but have not yet been +added as a collaborator, we probably forgot to add you, please open an issue. diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go new file mode 100644 index 0000000000..51fc45d878 --- /dev/null +++ b/vendor/github.com/urfave/cli/app.go @@ -0,0 +1,497 @@ +package cli + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "time" +) + +var ( + changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) + + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + + errInvalidActionType = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) +) + +// App is the main structure of a cli application. It is recommended that +// an app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // Description of the program + Description string + // List of commands to execute + Commands []Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + // Populate on app startup, only gettable through method Categories() + categories CommandCategories + // An action to execute when the bash-completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + + // The action to execute when no subcommands are specified + // Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}` + // *Note*: support for the deprecated `Action` signature will be removed in a future version + Action interface{} + + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if an usage error occurs + OnUsageError OnUsageErrorFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []Author + // Copyright of the binary if any + Copyright string + // Name of Author (Note: Use App.Authors, this is deprecated) + Author string + // Email of Author (Note: Use App.Authors, this is deprecated) + Email string + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // Other custom info + Metadata map[string]interface{} + // Carries a function which returns app specific info. + ExtraInfo func() map[string]string + // CustomAppHelpTemplate the text template for app help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomAppHelpTemplate string + + didSetup bool +} + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: filepath.Base(os.Args[0]), + HelpName: filepath.Base(os.Args[0]), + Usage: "A new cli application", + UsageText: "", + Version: "0.0.0", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Writer: os.Stdout, + } +} + +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + + if a.Author != "" || a.Email != "" { + a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } + + a.categories = CommandCategories{} + for _, command := range a.Commands { + a.categories = a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories) + + if a.Metadata == nil { + a.Metadata = make(map[string]interface{}) + } + + if a.Writer == nil { + a.Writer = os.Stdout + } +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + a.Setup() + + // handle the completion flag separately from the flagset since + // completion could be attempted after a flag, but before its value was put + // on the command line. this causes the flagset to interpret the completion + // flag name as the value of the flag before it which is undesirable + // note that we can only do this because the shell autocomplete function + // always appends the completion flag at the end of the command + shellComplete, arguments := checkShellCompleteFlag(a, arguments) + + // parse flags + set, err := flagSet(a.Name, a.Flags) + if err != nil { + return err + } + + set.SetOutput(ioutil.Discard) + err = set.Parse(arguments[1:]) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, nil) + if nerr != nil { + fmt.Fprintln(a.Writer, nerr) + ShowAppHelp(context) + return nerr + } + context.shellComplete = shellComplete + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err := a.OnUsageError(context, err, false) + HandleExitCoder(err) + return err + } + fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + ShowAppHelp(context) + return err + } + + if !a.HideHelp && checkHelp(context) { + ShowAppHelp(context) + return nil + } + + if !a.HideVersion && checkVersion(context) { + ShowVersion(context) + return nil + } + + if a.After != nil { + defer func() { + if afterErr := a.After(context); afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + ShowAppHelp(context) + HandleExitCoder(beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + if a.Action == nil { + a.Action = helpCommand.Action + } + + // Run default Action + err = HandleAction(a.Action, context) + + HandleExitCoder(err) + return err +} + +// RunAndExitOnError calls .Run() and exits non-zero if an error was returned +// +// Deprecated: instead you should return an error that fulfills cli.ExitCoder +// to cli.App.Run. This will cause the application to exit with the given eror +// code in the cli.ExitCoder +func (a *App) RunAndExitOnError() { + if err := a.Run(os.Args); err != nil { + fmt.Fprintln(a.errWriter(), err) + OsExiter(1) + } +} + +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags +func (a *App) RunAsSubcommand(ctx *Context) (err error) { + // append help to commands + if len(a.Commands) > 0 { + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + // parse flags + set, err := flagSet(a.Name, a.Flags) + if err != nil { + return err + } + + set.SetOutput(ioutil.Discard) + err = set.Parse(ctx.Args().Tail()) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, ctx) + + if nerr != nil { + fmt.Fprintln(a.Writer, nerr) + fmt.Fprintln(a.Writer) + if len(a.Commands) > 0 { + ShowSubcommandHelp(context) + } else { + ShowCommandHelp(ctx, context.Args().First()) + } + return nerr + } + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err = a.OnUsageError(context, err, true) + HandleExitCoder(err) + return err + } + fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + ShowSubcommandHelp(context) + return err + } + + if len(a.Commands) > 0 { + if checkSubcommandHelp(context) { + return nil + } + } else { + if checkCommandHelp(ctx, context.Args().First()) { + return nil + } + } + + if a.After != nil { + defer func() { + afterErr := a.After(context) + if afterErr != nil { + HandleExitCoder(err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + HandleExitCoder(beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + err = HandleAction(a.Action, context) + + HandleExitCoder(err) + return err +} + +// Command returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return &c + } + } + + return nil +} + +// Categories returns a slice containing all the categories with the commands they contain +func (a *App) Categories() CommandCategories { + return a.categories +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []*CommandCategory { + ret := []*CommandCategory{} + for _, category := range a.categories { + if visible := func() *CommandCategory { + for _, command := range category.Commands { + if !command.Hidden { + return category + } + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []Command { + ret := []Command{} + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + +func (a *App) hasFlag(flag Flag) bool { + for _, f := range a.Flags { + if flag == f { + return true + } + } + + return false +} + +func (a *App) errWriter() io.Writer { + + // When the app ErrWriter is nil use the package level one. + if a.ErrWriter == nil { + return ErrWriter + } + + return a.ErrWriter +} + +func (a *App) appendFlag(flag Flag) { + if !a.hasFlag(flag) { + a.Flags = append(a.Flags, flag) + } +} + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a Author) String() string { + e := "" + if a.Email != "" { + e = " <" + a.Email + ">" + } + + return fmt.Sprintf("%v%v", a.Name, e) +} + +// HandleAction attempts to figure out which Action signature was used. If +// it's an ActionFunc or a func with the legacy signature for Action, the func +// is run! +func HandleAction(action interface{}, context *Context) (err error) { + if a, ok := action.(ActionFunc); ok { + return a(context) + } else if a, ok := action.(func(*Context) error); ok { + return a(context) + } else if a, ok := action.(func(*Context)); ok { // deprecated function signature + a(context) + return nil + } else { + return errInvalidActionType + } +} diff --git a/vendor/github.com/urfave/cli/appveyor.yml b/vendor/github.com/urfave/cli/appveyor.yml new file mode 100644 index 0000000000..1e1489c365 --- /dev/null +++ b/vendor/github.com/urfave/cli/appveyor.yml @@ -0,0 +1,26 @@ +version: "{build}" + +os: Windows Server 2016 + +image: Visual Studio 2017 + +clone_folder: c:\gopath\src\github.com\urfave\cli + +environment: + GOPATH: C:\gopath + GOVERSION: 1.8.x + PYTHON: C:\Python36-x64 + PYTHON_VERSION: 3.6.x + PYTHON_ARCH: 64 + +install: +- set PATH=%GOPATH%\bin;C:\go\bin;%PATH% +- go version +- go env +- go get github.com/urfave/gfmrun/... +- go get -v -t ./... + +build_script: +- python runtests vet +- python runtests test +- python runtests gfmrun diff --git a/vendor/github.com/urfave/cli/category.go b/vendor/github.com/urfave/cli/category.go new file mode 100644 index 0000000000..1a6055023e --- /dev/null +++ b/vendor/github.com/urfave/cli/category.go @@ -0,0 +1,44 @@ +package cli + +// CommandCategories is a slice of *CommandCategory. +type CommandCategories []*CommandCategory + +// CommandCategory is a category containing commands. +type CommandCategory struct { + Name string + Commands Commands +} + +func (c CommandCategories) Less(i, j int) bool { + return c[i].Name < c[j].Name +} + +func (c CommandCategories) Len() int { + return len(c) +} + +func (c CommandCategories) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// AddCommand adds a command to a category. +func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { + for _, commandCategory := range c { + if commandCategory.Name == category { + commandCategory.Commands = append(commandCategory.Commands, command) + return c + } + } + return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *CommandCategory) VisibleCommands() []Command { + ret := []Command{} + for _, command := range c.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/vendor/github.com/urfave/cli/cli.go b/vendor/github.com/urfave/cli/cli.go new file mode 100644 index 0000000000..90c07eb8ef --- /dev/null +++ b/vendor/github.com/urfave/cli/cli.go @@ -0,0 +1,22 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// func main() { +// cli.NewApp().Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// func main() { +// app := cli.NewApp() +// app.Name = "greet" +// app.Usage = "say a greeting" +// app.Action = func(c *cli.Context) error { +// println("Greetings") +// return nil +// } +// +// app.Run(os.Args) +// } +package cli + +//go:generate python ./generate-flag-types cli -i flag-types.json -o flag_generated.go diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go new file mode 100644 index 0000000000..23de2944be --- /dev/null +++ b/vendor/github.com/urfave/cli/command.go @@ -0,0 +1,304 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "sort" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // short name of the command. Typically one character (deprecated, use `Aliases`) + ShortName string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands Commands + // List of flags to parse + Flags []Flag + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Skip argument reordering which attempts to move flags before arguments, + // but only works if all flags appear after all arguments. This behavior was + // removed n version 2 since it only works under specific conditions so we + // backport here by exposing it as an option for compatibility. + SkipArgReorder bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide this command from help or completion + Hidden bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string + + // CustomHelpTemplate the text template for the command help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomHelpTemplate string +} + +type CommandsByName []Command + +func (c CommandsByName) Len() int { + return len(c) +} + +func (c CommandsByName) Less(i, j int) bool { + return c[i].Name < c[j].Name +} + +func (c CommandsByName) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// FullName returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} + +// Commands is a slice of Command +type Commands []Command + +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags +func (c Command) Run(ctx *Context) (err error) { + if len(c.Subcommands) > 0 { + return c.startApp(ctx) + } + + if !c.HideHelp && (HelpFlag != BoolFlag{}) { + // append help to flags + c.Flags = append( + c.Flags, + HelpFlag, + ) + } + + set, err := flagSet(c.Name, c.Flags) + if err != nil { + return err + } + set.SetOutput(ioutil.Discard) + + if c.SkipFlagParsing { + err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) + } else if !c.SkipArgReorder { + firstFlagIndex := -1 + terminatorIndex := -1 + for index, arg := range ctx.Args() { + if arg == "--" { + terminatorIndex = index + break + } else if arg == "-" { + // Do nothing. A dash alone is not really a flag. + continue + } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { + firstFlagIndex = index + } + } + + if firstFlagIndex > -1 { + args := ctx.Args() + regularArgs := make([]string, len(args[1:firstFlagIndex])) + copy(regularArgs, args[1:firstFlagIndex]) + + var flagArgs []string + if terminatorIndex > -1 { + flagArgs = args[firstFlagIndex:terminatorIndex] + regularArgs = append(regularArgs, args[terminatorIndex:]...) + } else { + flagArgs = args[firstFlagIndex:] + } + + err = set.Parse(append(flagArgs, regularArgs...)) + } else { + err = set.Parse(ctx.Args().Tail()) + } + } else { + err = set.Parse(ctx.Args().Tail()) + } + + nerr := normalizeFlags(c.Flags, set) + if nerr != nil { + fmt.Fprintln(ctx.App.Writer, nerr) + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return nerr + } + + context := NewContext(ctx.App, set, ctx) + context.Command = c + if checkCommandCompletions(context, c.Name) { + return nil + } + + if err != nil { + if c.OnUsageError != nil { + err := c.OnUsageError(context, err, false) + HandleExitCoder(err) + return err + } + fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error()) + fmt.Fprintln(context.App.Writer) + ShowCommandHelp(context, c.Name) + return err + } + + if checkCommandHelp(context, c.Name) { + return nil + } + + if c.After != nil { + defer func() { + afterErr := c.After(context) + if afterErr != nil { + HandleExitCoder(err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if c.Before != nil { + err = c.Before(context) + if err != nil { + ShowCommandHelp(context, c.Name) + HandleExitCoder(err) + return err + } + } + + if c.Action == nil { + c.Action = helpSubcommand.Action + } + + err = HandleAction(c.Action, context) + + if err != nil { + HandleExitCoder(err) + } + return err +} + +// Names returns the names including short names and aliases. +func (c Command) Names() []string { + names := []string{c.Name} + + if c.ShortName != "" { + names = append(names, c.ShortName) + } + + return append(names, c.Aliases...) +} + +// HasName returns true if Command.Name or Command.ShortName matches given name +func (c Command) HasName(name string) bool { + for _, n := range c.Names() { + if n == name { + return true + } + } + return false +} + +func (c Command) startApp(ctx *Context) error { + app := NewApp() + app.Metadata = ctx.App.Metadata + // set the name and usage + app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + if c.HelpName == "" { + app.HelpName = c.HelpName + } else { + app.HelpName = app.Name + } + + app.Usage = c.Usage + app.Description = c.Description + app.ArgsUsage = c.ArgsUsage + + // set CommandNotFound + app.CommandNotFound = ctx.App.CommandNotFound + app.CustomAppHelpTemplate = c.CustomHelpTemplate + + // set the flags and commands + app.Commands = c.Subcommands + app.Flags = c.Flags + app.HideHelp = c.HideHelp + + app.Version = ctx.App.Version + app.HideVersion = ctx.App.HideVersion + app.Compiled = ctx.App.Compiled + app.Author = ctx.App.Author + app.Email = ctx.App.Email + app.Writer = ctx.App.Writer + app.ErrWriter = ctx.App.ErrWriter + + app.categories = CommandCategories{} + for _, command := range c.Subcommands { + app.categories = app.categories.AddCommand(command.Category, command) + } + + sort.Sort(app.categories) + + // bash completion + app.EnableBashCompletion = ctx.App.EnableBashCompletion + if c.BashComplete != nil { + app.BashComplete = c.BashComplete + } + + // set the actions + app.Before = c.Before + app.After = c.After + if c.Action != nil { + app.Action = c.Action + } else { + app.Action = helpSubcommand.Action + } + app.OnUsageError = c.OnUsageError + + for index, cc := range app.Commands { + app.Commands[index].commandNamePath = []string{c.Name, cc.Name} + } + + return app.RunAsSubcommand(ctx) +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} diff --git a/vendor/github.com/urfave/cli/context.go b/vendor/github.com/urfave/cli/context.go new file mode 100644 index 0000000000..db94191e2a --- /dev/null +++ b/vendor/github.com/urfave/cli/context.go @@ -0,0 +1,278 @@ +package cli + +import ( + "errors" + "flag" + "reflect" + "strings" + "syscall" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific Args and +// parsed command-line options. +type Context struct { + App *App + Command Command + shellComplete bool + flagSet *flag.FlagSet + setFlags map[string]bool + parentContext *Context +} + +// NewContext creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + c := &Context{App: app, flagSet: set, parentContext: parentCtx} + + if parentCtx != nil { + c.shellComplete = parentCtx.shellComplete + } + + return c +} + +// NumFlags returns the number of flags set +func (c *Context) NumFlags() int { + return c.flagSet.NFlag() +} + +// Set sets a context flag to a value. +func (c *Context) Set(name, value string) error { + c.setFlags = nil + return c.flagSet.Set(name, value) +} + +// GlobalSet sets a context flag to a value on the global flagset +func (c *Context) GlobalSet(name, value string) error { + globalContext(c).setFlags = nil + return globalContext(c).flagSet.Set(name, value) +} + +// IsSet determines if the flag was actually set +func (c *Context) IsSet(name string) bool { + if c.setFlags == nil { + c.setFlags = make(map[string]bool) + + c.flagSet.Visit(func(f *flag.Flag) { + c.setFlags[f.Name] = true + }) + + c.flagSet.VisitAll(func(f *flag.Flag) { + if _, ok := c.setFlags[f.Name]; ok { + return + } + c.setFlags[f.Name] = false + }) + + // XXX hack to support IsSet for flags with EnvVar + // + // There isn't an easy way to do this with the current implementation since + // whether a flag was set via an environment variable is very difficult to + // determine here. Instead, we intend to introduce a backwards incompatible + // change in version 2 to add `IsSet` to the Flag interface to push the + // responsibility closer to where the information required to determine + // whether a flag is set by non-standard means such as environment + // variables is avaliable. + // + // See https://github.com/urfave/cli/issues/294 for additional discussion + flags := c.Command.Flags + if c.Command.Name == "" { // cannot == Command{} since it contains slice types + if c.App != nil { + flags = c.App.Flags + } + } + for _, f := range flags { + eachName(f.GetName(), func(name string) { + if isSet, ok := c.setFlags[name]; isSet || !ok { + return + } + + val := reflect.ValueOf(f) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + + envVarValue := val.FieldByName("EnvVar") + if !envVarValue.IsValid() { + return + } + + eachName(envVarValue.String(), func(envVar string) { + envVar = strings.TrimSpace(envVar) + if _, ok := syscall.Getenv(envVar); ok { + c.setFlags[name] = true + return + } + }) + }) + } + } + + return c.setFlags[name] +} + +// GlobalIsSet determines if the global flag was actually set +func (c *Context) GlobalIsSet(name string) bool { + ctx := c + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + + for ; ctx != nil; ctx = ctx.parentContext { + if ctx.IsSet(name) { + return true + } + } + return false +} + +// FlagNames returns a slice of flag names used in this context. +func (c *Context) FlagNames() (names []string) { + for _, flag := range c.Command.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" { + continue + } + names = append(names, name) + } + return +} + +// GlobalFlagNames returns a slice of global flag names used by the app. +func (c *Context) GlobalFlagNames() (names []string) { + for _, flag := range c.App.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" || name == "version" { + continue + } + names = append(names, name) + } + return +} + +// Parent returns the parent context, if any +func (c *Context) Parent() *Context { + return c.parentContext +} + +// value returns the value of the flag coressponding to `name` +func (c *Context) value(name string) interface{} { + return c.flagSet.Lookup(name).Value.(flag.Getter).Get() +} + +// Args contains apps console arguments +type Args []string + +// Args returns the command line arguments associated with the context. +func (c *Context) Args() Args { + args := Args(c.flagSet.Args()) + return args +} + +// NArg returns the number of the command line arguments. +func (c *Context) NArg() int { + return len(c.Args()) +} + +// Get returns the nth argument, or else a blank string +func (a Args) Get(n int) string { + if len(a) > n { + return a[n] + } + return "" +} + +// First returns the first argument, or else a blank string +func (a Args) First() string { + return a.Get(0) +} + +// Tail returns the rest of the arguments (not the first one) +// or else an empty string slice +func (a Args) Tail() []string { + if len(a) >= 2 { + return []string(a)[1:] + } + return []string{} +} + +// Present checks if there are any arguments present +func (a Args) Present() bool { + return len(a) != 0 +} + +// Swap swaps arguments at the given indexes +func (a Args) Swap(from, to int) error { + if from >= len(a) || to >= len(a) { + return errors.New("index out of range") + } + a[from], a[to] = a[to], a[from] + return nil +} + +func globalContext(ctx *Context) *Context { + if ctx == nil { + return nil + } + + for { + if ctx.parentContext == nil { + return ctx + } + ctx = ctx.parentContext + } +} + +func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil; ctx = ctx.parentContext { + if f := ctx.flagSet.Lookup(name); f != nil { + return ctx.flagSet + } + } + return nil +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case *StringSlice: + default: + set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := strings.Split(f.GetName(), ",") + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/errors.go b/vendor/github.com/urfave/cli/errors.go new file mode 100644 index 0000000000..562b2953cf --- /dev/null +++ b/vendor/github.com/urfave/cli/errors.go @@ -0,0 +1,115 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError struct { + Errors []error +} + +// NewMultiError creates a new MultiError. Pass in one or more errors. +func NewMultiError(err ...error) MultiError { + return MultiError{Errors: err} +} + +// Error implements the error interface. +func (m MultiError) Error() string { + errs := make([]string, len(m.Errors)) + for i, err := range m.Errors { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +// ExitError fulfills both the builtin `error` interface and `ExitCoder` +type ExitError struct { + exitCode int + message interface{} +} + +// NewExitError makes a new *ExitError +func NewExitError(message interface{}, exitCode int) *ExitError { + return &ExitError{ + exitCode: exitCode, + message: message, + } +} + +// Error returns the string message, fulfilling the interface required by +// `error` +func (ee *ExitError) Error() string { + return fmt.Sprintf("%v", ee.message) +} + +// ExitCode returns the exit code, fulfilling the interface required by +// `ExitCoder` +func (ee *ExitError) ExitCode() int { + return ee.exitCode +} + +// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if +// so prints the error to stderr (if it is non-empty) and calls OsExiter with the +// given exit code. If the given error is a MultiError, then this func is +// called on all members of the Errors slice and calls OsExiter with the last exit code. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + if _, ok := exitErr.(ErrorFormatter); ok { + fmt.Fprintf(ErrWriter, "%+v\n", err) + } else { + fmt.Fprintln(ErrWriter, err) + } + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + code := handleMultiError(multiErr) + OsExiter(code) + return + } +} + +func handleMultiError(multiErr MultiError) int { + code := 1 + for _, merr := range multiErr.Errors { + if multiErr2, ok := merr.(MultiError); ok { + code = handleMultiError(multiErr2) + } else { + fmt.Fprintln(ErrWriter, merr) + if exitErr, ok := merr.(ExitCoder); ok { + code = exitErr.ExitCode() + } + } + } + return code +} diff --git a/vendor/github.com/urfave/cli/flag-types.json b/vendor/github.com/urfave/cli/flag-types.json new file mode 100644 index 0000000000..1223107859 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag-types.json @@ -0,0 +1,93 @@ +[ + { + "name": "Bool", + "type": "bool", + "value": false, + "context_default": "false", + "parser": "strconv.ParseBool(f.Value.String())" + }, + { + "name": "BoolT", + "type": "bool", + "value": false, + "doctail": " that is true by default", + "context_default": "false", + "parser": "strconv.ParseBool(f.Value.String())" + }, + { + "name": "Duration", + "type": "time.Duration", + "doctail": " (see https://golang.org/pkg/time/#ParseDuration)", + "context_default": "0", + "parser": "time.ParseDuration(f.Value.String())" + }, + { + "name": "Float64", + "type": "float64", + "context_default": "0", + "parser": "strconv.ParseFloat(f.Value.String(), 64)" + }, + { + "name": "Generic", + "type": "Generic", + "dest": false, + "context_default": "nil", + "context_type": "interface{}" + }, + { + "name": "Int64", + "type": "int64", + "context_default": "0", + "parser": "strconv.ParseInt(f.Value.String(), 0, 64)" + }, + { + "name": "Int", + "type": "int", + "context_default": "0", + "parser": "strconv.ParseInt(f.Value.String(), 0, 64)", + "parser_cast": "int(parsed)" + }, + { + "name": "IntSlice", + "type": "*IntSlice", + "dest": false, + "context_default": "nil", + "context_type": "[]int", + "parser": "(f.Value.(*IntSlice)).Value(), error(nil)" + }, + { + "name": "Int64Slice", + "type": "*Int64Slice", + "dest": false, + "context_default": "nil", + "context_type": "[]int64", + "parser": "(f.Value.(*Int64Slice)).Value(), error(nil)" + }, + { + "name": "String", + "type": "string", + "context_default": "\"\"", + "parser": "f.Value.String(), error(nil)" + }, + { + "name": "StringSlice", + "type": "*StringSlice", + "dest": false, + "context_default": "nil", + "context_type": "[]string", + "parser": "(f.Value.(*StringSlice)).Value(), error(nil)" + }, + { + "name": "Uint64", + "type": "uint64", + "context_default": "0", + "parser": "strconv.ParseUint(f.Value.String(), 0, 64)" + }, + { + "name": "Uint", + "type": "uint", + "context_default": "0", + "parser": "strconv.ParseUint(f.Value.String(), 0, 64)", + "parser_cast": "uint(parsed)" + } +] diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go new file mode 100644 index 0000000000..877ff3523d --- /dev/null +++ b/vendor/github.com/urfave/cli/flag.go @@ -0,0 +1,799 @@ +package cli + +import ( + "flag" + "fmt" + "reflect" + "runtime" + "strconv" + "strings" + "syscall" + "time" +) + +const defaultPlaceholder = "value" + +// BashCompletionFlag enables bash-completion for all commands and subcommands +var BashCompletionFlag Flag = BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + +// VersionFlag prints the version for the application +var VersionFlag Flag = BoolFlag{ + Name: "version, v", + Usage: "print the version", +} + +// HelpFlag prints the help for all commands and subcommands +// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand +// unless HideHelp is set to true) +var HelpFlag Flag = BoolFlag{ + Name: "help, h", + Usage: "show help", +} + +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + +// FlagsByName is a slice of Flag. +type FlagsByName []Flag + +func (f FlagsByName) Len() int { + return len(f) +} + +func (f FlagsByName) Less(i, j int) bool { + return f[i].GetName() < f[j].GetName() +} + +func (f FlagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recommended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) + GetName() string +} + +// errorableFlag is an interface that allows us to return errors during apply +// it allows flags defined in this library to return errors in a fashion backwards compatible +// TODO remove in v2 and modify the existing Flag interface to return errors +type errorableFlag interface { + Flag + + ApplyWithError(*flag.FlagSet) error +} + +func flagSet(name string, flags []Flag) (*flag.FlagSet, error) { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + //TODO remove in v2 when errorableFlag is removed + if ef, ok := f.(errorableFlag); ok { + if err := ef.ApplyWithError(set); err != nil { + return nil, err + } + } else { + f.Apply(set) + } + } + return set, nil +} + +func eachName(longName string, fn func(string)) { + parts := strings.Split(longName, ",") + for _, name := range parts { + name = strings.Trim(name, " ") + fn(name) + } +} + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +// Ignores parsing errors +func (f GenericFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f GenericFlag) ApplyWithError(set *flag.FlagSet) error { + val := f.Value + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + if err := val.Set(envVal); err != nil { + return fmt.Errorf("could not parse %s as value for flag %s: %s", envVal, f.Name, err) + } + break + } + } + } + + eachName(f.Name, func(name string) { + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// StringSlice is an opaque type for []string to satisfy flag.Value and flag.Getter +type StringSlice []string + +// Set appends the string value to the list of values +func (f *StringSlice) Set(value string) error { + *f = append(*f, value) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *StringSlice) String() string { + return fmt.Sprintf("%s", *f) +} + +// Value returns the slice of strings set by this flag +func (f *StringSlice) Value() []string { + return *f +} + +// Get returns the slice of strings set by this flag +func (f *StringSlice) Get() interface{} { + return *f +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f StringSliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringSliceFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + newVal := &StringSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as string value for flag %s: %s", envVal, f.Name, err) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &StringSlice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// IntSlice is an opaque type for []int to satisfy flag.Value and flag.Getter +type IntSlice []int + +// Set parses the value into an integer and appends it to the list of values +func (f *IntSlice) Set(value string) error { + tmp, err := strconv.Atoi(value) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *IntSlice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *IntSlice) Value() []int { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *IntSlice) Get() interface{} { + return *f +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntSliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntSliceFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + newVal := &IntSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int slice value for flag %s: %s", envVal, f.Name, err) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &IntSlice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// Int64Slice is an opaque type for []int to satisfy flag.Value and flag.Getter +type Int64Slice []int64 + +// Set parses the value into an integer and appends it to the list of values +func (f *Int64Slice) Set(value string) error { + tmp, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Int64Slice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *Int64Slice) Value() []int64 { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *Int64Slice) Get() interface{} { + return *f +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64SliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + newVal := &Int64Slice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int64 slice value for flag %s: %s", envVal, f.Name, err) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &Int64Slice{} + } + set.Var(f.Value, name, f.Usage) + }) + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f BoolFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolFlag) ApplyWithError(set *flag.FlagSet) error { + val := false + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + if envVal == "" { + val = false + break + } + + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + + val = envValBool + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f BoolTFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolTFlag) ApplyWithError(set *flag.FlagSet) error { + val := true + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + if envVal == "" { + val = false + break + } + + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + + val = envValBool + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f StringFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + f.Value = envVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + return + } + set.String(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + f.Value = int(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Int(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64Flag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValInt + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Int64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Int64(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f UintFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f UintFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = uint(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Uint64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Uint64Flag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = uint64(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint64(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f DurationFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f DurationFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValDuration, err := time.ParseDuration(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as duration for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValDuration + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Duration(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Float64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Float64Flag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValFloat, err := strconv.ParseFloat(envVal, 10) + if err != nil { + return fmt.Errorf("could not parse %s as float64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = float64(envValFloat) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Float64(name, f.Value, f.Usage) + }) + + return nil +} + +func visibleFlags(fl []Flag) []Flag { + visible := []Flag{} + for _, flag := range fl { + field := flagValue(flag).FieldByName("Hidden") + if !field.IsValid() || !field.Bool() { + visible = append(visible, flag) + } + } + return visible +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(fullName, placeholder string) string { + var prefixed string + parts := strings.Split(fullName, ",") + for i, name := range parts { + name = strings.Trim(name, " ") + prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } + if i < len(parts)-1 { + prefixed += ", " + } + } + return prefixed +} + +func withEnvHint(envVar, str string) string { + envText := "" + if envVar != "" { + prefix := "$" + suffix := "" + sep := ", $" + if runtime.GOOS == "windows" { + prefix = "%" + suffix = "%" + sep = "%, %" + } + envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix) + } + return str + envText +} + +func flagValue(f Flag) reflect.Value { + fv := reflect.ValueOf(f) + for fv.Kind() == reflect.Ptr { + fv = reflect.Indirect(fv) + } + return fv +} + +func stringifyFlag(f Flag) string { + fv := flagValue(f) + + switch f.(type) { + case IntSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyIntSliceFlag(f.(IntSliceFlag))) + case Int64SliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyInt64SliceFlag(f.(Int64SliceFlag))) + case StringSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyStringSliceFlag(f.(StringSliceFlag))) + } + + placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) + + needsPlaceholder := false + defaultValueString := "" + + if val := fv.FieldByName("Value"); val.IsValid() { + needsPlaceholder = true + defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface()) + + if val.Kind() == reflect.String && val.String() != "" { + defaultValueString = fmt.Sprintf(" (default: %q)", val.String()) + } + } + + if defaultValueString == " (default: )" { + defaultValueString = "" + } + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString)) + + return withEnvHint(fv.FieldByName("EnvVar").String(), + fmt.Sprintf("%s\t%s", prefixedNames(fv.FieldByName("Name").String(), placeholder), usageWithDefault)) +} + +func stringifyIntSliceFlag(f IntSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyInt64SliceFlag(f Int64SliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyStringSliceFlag(f StringSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, fmt.Sprintf("%q", s)) + } + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifySliceFlag(usage, name string, defaultVals []string) string { + placeholder, usage := unquoteUsage(usage) + if placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultVal := "" + if len(defaultVals) > 0 { + defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) + return fmt.Sprintf("%s\t%s", prefixedNames(name, placeholder), usageWithDefault) +} diff --git a/vendor/github.com/urfave/cli/flag_generated.go b/vendor/github.com/urfave/cli/flag_generated.go new file mode 100644 index 0000000000..491b61956c --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_generated.go @@ -0,0 +1,627 @@ +package cli + +import ( + "flag" + "strconv" + "time" +) + +// WARNING: This file is generated! + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolFlag) GetName() string { + return f.Name +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (c *Context) Bool(name string) bool { + return lookupBool(name, c.flagSet) +} + +// GlobalBool looks up the value of a global BoolFlag, returns +// false if not found +func (c *Context) GlobalBool(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} + +// BoolTFlag is a flag with type bool that is true by default +type BoolTFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolTFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolTFlag) GetName() string { + return f.Name +} + +// BoolT looks up the value of a local BoolTFlag, returns +// false if not found +func (c *Context) BoolT(name string) bool { + return lookupBoolT(name, c.flagSet) +} + +// GlobalBoolT looks up the value of a global BoolTFlag, returns +// false if not found +func (c *Context) GlobalBoolT(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBoolT(name, fs) + } + return false +} + +func lookupBoolT(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} + +// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) +type DurationFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value time.Duration + Destination *time.Duration +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f DurationFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f DurationFlag) GetName() string { + return f.Name +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (c *Context) Duration(name string) time.Duration { + return lookupDuration(name, c.flagSet) +} + +// GlobalDuration looks up the value of a global DurationFlag, returns +// 0 if not found +func (c *Context) GlobalDuration(name string) time.Duration { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value float64 + Destination *float64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Float64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Float64Flag) GetName() string { + return f.Name +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (c *Context) Float64(name string) float64 { + return lookupFloat64(name, c.flagSet) +} + +// GlobalFloat64 looks up the value of a global Float64Flag, returns +// 0 if not found +func (c *Context) GlobalFloat64(name string) float64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value Generic +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f GenericFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f GenericFlag) GetName() string { + return f.Name +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (c *Context) Generic(name string) interface{} { + return lookupGeneric(name, c.flagSet) +} + +// GlobalGeneric looks up the value of a global GenericFlag, returns +// nil if not found +func (c *Context) GlobalGeneric(name string) interface{} { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value, error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value int64 + Destination *int64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64Flag) GetName() string { + return f.Name +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (c *Context) Int64(name string) int64 { + return lookupInt64(name, c.flagSet) +} + +// GlobalInt64 looks up the value of a global Int64Flag, returns +// 0 if not found +func (c *Context) GlobalInt64(name string) int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value int + Destination *int +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntFlag) GetName() string { + return f.Name +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (c *Context) Int(name string) int { + return lookupInt(name, c.flagSet) +} + +// GlobalInt looks up the value of a global IntFlag, returns +// 0 if not found +func (c *Context) GlobalInt(name string) int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *IntSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntSliceFlag) GetName() string { + return f.Name +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (c *Context) IntSlice(name string) []int { + return lookupIntSlice(name, c.flagSet) +} + +// GlobalIntSlice looks up the value of a global IntSliceFlag, returns +// nil if not found +func (c *Context) GlobalIntSlice(name string) []int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupIntSlice(name, fs) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*IntSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *Int64Slice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64SliceFlag) GetName() string { + return f.Name +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (c *Context) Int64Slice(name string) []int64 { + return lookupInt64Slice(name, c.flagSet) +} + +// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns +// nil if not found +func (c *Context) GlobalInt64Slice(name string) []int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64Slice(name, fs) + } + return nil +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value string + Destination *string +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringFlag) GetName() string { + return f.Name +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (c *Context) String(name string) string { + return lookupString(name, c.flagSet) +} + +// GlobalString looks up the value of a global StringFlag, returns +// "" if not found +func (c *Context) GlobalString(name string) string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *StringSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringSliceFlag) GetName() string { + return f.Name +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (c *Context) StringSlice(name string) []string { + return lookupStringSlice(name, c.flagSet) +} + +// GlobalStringSlice looks up the value of a global StringSliceFlag, returns +// nil if not found +func (c *Context) GlobalStringSlice(name string) []string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*StringSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value uint64 + Destination *uint64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Uint64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Uint64Flag) GetName() string { + return f.Name +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (c *Context) Uint64(name string) uint64 { + return lookupUint64(name, c.flagSet) +} + +// GlobalUint64 looks up the value of a global Uint64Flag, returns +// 0 if not found +func (c *Context) GlobalUint64(name string) uint64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value uint + Destination *uint +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f UintFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f UintFlag) GetName() string { + return f.Name +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (c *Context) Uint(name string) uint { + return lookupUint(name, c.flagSet) +} + +// GlobalUint looks up the value of a global UintFlag, returns +// 0 if not found +func (c *Context) GlobalUint(name string) uint { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/funcs.go b/vendor/github.com/urfave/cli/funcs.go new file mode 100644 index 0000000000..cba5e6cb0c --- /dev/null +++ b/vendor/github.com/urfave/cli/funcs.go @@ -0,0 +1,28 @@ +package cli + +// BashCompleteFunc is an action to execute when the bash-completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string diff --git a/vendor/github.com/urfave/cli/generate-flag-types b/vendor/github.com/urfave/cli/generate-flag-types new file mode 100644 index 0000000000..7147381ce3 --- /dev/null +++ b/vendor/github.com/urfave/cli/generate-flag-types @@ -0,0 +1,255 @@ +#!/usr/bin/env python +""" +The flag types that ship with the cli library have many things in common, and +so we can take advantage of the `go generate` command to create much of the +source code from a list of definitions. These definitions attempt to cover +the parts that vary between flag types, and should evolve as needed. + +An example of the minimum definition needed is: + + { + "name": "SomeType", + "type": "sometype", + "context_default": "nil" + } + +In this example, the code generated for the `cli` package will include a type +named `SomeTypeFlag` that is expected to wrap a value of type `sometype`. +Fetching values by name via `*cli.Context` will default to a value of `nil`. + +A more complete, albeit somewhat redundant, example showing all available +definition keys is: + + { + "name": "VeryMuchType", + "type": "*VeryMuchType", + "value": true, + "dest": false, + "doctail": " which really only wraps a []float64, oh well!", + "context_type": "[]float64", + "context_default": "nil", + "parser": "parseVeryMuchType(f.Value.String())", + "parser_cast": "[]float64(parsed)" + } + +The meaning of each field is as follows: + + name (string) - The type "name", which will be suffixed with + `Flag` when generating the type definition + for `cli` and the wrapper type for `altsrc` + type (string) - The type that the generated `Flag` type for `cli` + is expected to "contain" as its `.Value` member + value (bool) - Should the generated `cli` type have a `Value` + member? + dest (bool) - Should the generated `cli` type support a + destination pointer? + doctail (string) - Additional docs for the `cli` flag type comment + context_type (string) - The literal type used in the `*cli.Context` + reader func signature + context_default (string) - The literal value used as the default by the + `*cli.Context` reader funcs when no value is + present + parser (string) - Literal code used to parse the flag `f`, + expected to have a return signature of + (value, error) + parser_cast (string) - Literal code used to cast the `parsed` value + returned from the `parser` code +""" + +from __future__ import print_function, unicode_literals + +import argparse +import json +import os +import subprocess +import sys +import tempfile +import textwrap + + +class _FancyFormatter(argparse.ArgumentDefaultsHelpFormatter, + argparse.RawDescriptionHelpFormatter): + pass + + +def main(sysargs=sys.argv[:]): + parser = argparse.ArgumentParser( + description='Generate flag type code!', + formatter_class=_FancyFormatter) + parser.add_argument( + 'package', + type=str, default='cli', choices=_WRITEFUNCS.keys(), + help='Package for which flag types will be generated' + ) + parser.add_argument( + '-i', '--in-json', + type=argparse.FileType('r'), + default=sys.stdin, + help='Input JSON file which defines each type to be generated' + ) + parser.add_argument( + '-o', '--out-go', + type=argparse.FileType('w'), + default=sys.stdout, + help='Output file/stream to which generated source will be written' + ) + parser.epilog = __doc__ + + args = parser.parse_args(sysargs[1:]) + _generate_flag_types(_WRITEFUNCS[args.package], args.out_go, args.in_json) + return 0 + + +def _generate_flag_types(writefunc, output_go, input_json): + types = json.load(input_json) + + tmp = tempfile.NamedTemporaryFile(suffix='.go', delete=False) + writefunc(tmp, types) + tmp.close() + + new_content = subprocess.check_output( + ['goimports', tmp.name] + ).decode('utf-8') + + print(new_content, file=output_go, end='') + output_go.flush() + os.remove(tmp.name) + + +def _set_typedef_defaults(typedef): + typedef.setdefault('doctail', '') + typedef.setdefault('context_type', typedef['type']) + typedef.setdefault('dest', True) + typedef.setdefault('value', True) + typedef.setdefault('parser', 'f.Value, error(nil)') + typedef.setdefault('parser_cast', 'parsed') + + +def _write_cli_flag_types(outfile, types): + _fwrite(outfile, """\ + package cli + + // WARNING: This file is generated! + + """) + + for typedef in types: + _set_typedef_defaults(typedef) + + _fwrite(outfile, """\ + // {name}Flag is a flag with type {type}{doctail} + type {name}Flag struct {{ + Name string + Usage string + EnvVar string + Hidden bool + """.format(**typedef)) + + if typedef['value']: + _fwrite(outfile, """\ + Value {type} + """.format(**typedef)) + + if typedef['dest']: + _fwrite(outfile, """\ + Destination *{type} + """.format(**typedef)) + + _fwrite(outfile, "\n}\n\n") + + _fwrite(outfile, """\ + // String returns a readable representation of this value + // (for usage defaults) + func (f {name}Flag) String() string {{ + return FlagStringer(f) + }} + + // GetName returns the name of the flag + func (f {name}Flag) GetName() string {{ + return f.Name + }} + + // {name} looks up the value of a local {name}Flag, returns + // {context_default} if not found + func (c *Context) {name}(name string) {context_type} {{ + return lookup{name}(name, c.flagSet) + }} + + // Global{name} looks up the value of a global {name}Flag, returns + // {context_default} if not found + func (c *Context) Global{name}(name string) {context_type} {{ + if fs := lookupGlobalFlagSet(name, c); fs != nil {{ + return lookup{name}(name, fs) + }} + return {context_default} + }} + + func lookup{name}(name string, set *flag.FlagSet) {context_type} {{ + f := set.Lookup(name) + if f != nil {{ + parsed, err := {parser} + if err != nil {{ + return {context_default} + }} + return {parser_cast} + }} + return {context_default} + }} + """.format(**typedef)) + + +def _write_altsrc_flag_types(outfile, types): + _fwrite(outfile, """\ + package altsrc + + import ( + "gopkg.in/urfave/cli.v1" + ) + + // WARNING: This file is generated! + + """) + + for typedef in types: + _set_typedef_defaults(typedef) + + _fwrite(outfile, """\ + // {name}Flag is the flag type that wraps cli.{name}Flag to allow + // for other values to be specified + type {name}Flag struct {{ + cli.{name}Flag + set *flag.FlagSet + }} + + // New{name}Flag creates a new {name}Flag + func New{name}Flag(fl cli.{name}Flag) *{name}Flag {{ + return &{name}Flag{{{name}Flag: fl, set: nil}} + }} + + // Apply saves the flagSet for later usage calls, then calls the + // wrapped {name}Flag.Apply + func (f *{name}Flag) Apply(set *flag.FlagSet) {{ + f.set = set + f.{name}Flag.Apply(set) + }} + + // ApplyWithError saves the flagSet for later usage calls, then calls the + // wrapped {name}Flag.ApplyWithError + func (f *{name}Flag) ApplyWithError(set *flag.FlagSet) error {{ + f.set = set + return f.{name}Flag.ApplyWithError(set) + }} + """.format(**typedef)) + + +def _fwrite(outfile, text): + print(textwrap.dedent(text), end='', file=outfile) + + +_WRITEFUNCS = { + 'cli': _write_cli_flag_types, + 'altsrc': _write_altsrc_flag_types +} + +if __name__ == '__main__': + sys.exit(main()) diff --git a/vendor/github.com/urfave/cli/help.go b/vendor/github.com/urfave/cli/help.go new file mode 100644 index 0000000000..57ec98d58a --- /dev/null +++ b/vendor/github.com/urfave/cli/help.go @@ -0,0 +1,338 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + "text/template" +) + +// AppHelpTemplate is the text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{.Name}}{{if .Usage}} - {{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if len .Authors}} + +AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: + {{range $index, $author := .Authors}}{{if $index}} + {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +GLOBAL OPTIONS: + {{range $index, $option := .VisibleFlags}}{{if $index}} + {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{.Copyright}}{{end}} +` + +// CommandHelpTemplate is the text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +// SubcommandHelpTemplate is the text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} +{{end}}{{if .VisibleFlags}} +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +var helpCommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + ShowAppHelp(c) + return nil + }, +} + +var helpSubcommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + return ShowSubcommandHelp(c) + }, +} + +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) + +// Prints help for the App or Command with custom template function. +type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{}) + +// HelpPrinter is a function that writes the help output. If not set a default +// is used. The function signature is: +// func(w io.Writer, templ string, data interface{}) +var HelpPrinter helpPrinter = printHelp + +// HelpPrinterCustom is same as HelpPrinter but +// takes a custom function for template function map. +var HelpPrinterCustom helpPrinterCustom = printHelpCustom + +// VersionPrinter prints the version for the App +var VersionPrinter = printVersion + +// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code. +func ShowAppHelpAndExit(c *Context, exitCode int) { + ShowAppHelp(c) + os.Exit(exitCode) +} + +// ShowAppHelp is an action that displays the help. +func ShowAppHelp(c *Context) (err error) { + if c.App.CustomAppHelpTemplate == "" { + HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) + return + } + customAppData := func() map[string]interface{} { + if c.App.ExtraInfo == nil { + return nil + } + return map[string]interface{}{ + "ExtraInfo": c.App.ExtraInfo, + } + } + HelpPrinterCustom(c.App.Writer, c.App.CustomAppHelpTemplate, c.App, customAppData()) + return nil +} + +// DefaultAppComplete prints the list of subcommands as the default app completion method +func DefaultAppComplete(c *Context) { + for _, command := range c.App.Commands { + if command.Hidden { + continue + } + for _, name := range command.Names() { + fmt.Fprintln(c.App.Writer, name) + } + } +} + +// ShowCommandHelpAndExit - exits with code after showing help +func ShowCommandHelpAndExit(c *Context, command string, code int) { + ShowCommandHelp(c, command) + os.Exit(code) +} + +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { + // show the subcommand help for a command with subcommands + if command == "" { + HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) + return nil + } + + for _, c := range ctx.App.Commands { + if c.HasName(command) { + if c.CustomHelpTemplate != "" { + HelpPrinterCustom(ctx.App.Writer, c.CustomHelpTemplate, c, nil) + } else { + HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) + } + return nil + } + } + + if ctx.App.CommandNotFound == nil { + return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) + } + + ctx.App.CommandNotFound(ctx, command) + return nil +} + +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + return ShowCommandHelp(c, c.Command.Name) +} + +// ShowVersion prints the version number of the App +func ShowVersion(c *Context) { + VersionPrinter(c) +} + +func printVersion(c *Context) { + fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) +} + +// ShowCompletions prints the lists of commands within a given context +func ShowCompletions(c *Context) { + a := c.App + if a != nil && a.BashComplete != nil { + a.BashComplete(c) + } +} + +// ShowCommandCompletions prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.App.Command(command) + if c != nil && c.BashComplete != nil { + c.BashComplete(ctx) + } +} + +func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc map[string]interface{}) { + funcMap := template.FuncMap{ + "join": strings.Join, + } + if customFunc != nil { + for key, value := range customFunc { + funcMap[key] = value + } + } + + w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + err := t.Execute(w, data) + if err != nil { + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + return + } + w.Flush() +} + +func printHelp(out io.Writer, templ string, data interface{}) { + printHelpCustom(out, templ, data, nil) +} + +func checkVersion(c *Context) bool { + found := false + if VersionFlag.GetName() != "" { + eachName(VersionFlag.GetName(), func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkHelp(c *Context) bool { + found := false + if HelpFlag.GetName() != "" { + eachName(HelpFlag.GetName(), func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkCommandHelp(c *Context, name string) bool { + if c.Bool("h") || c.Bool("help") { + ShowCommandHelp(c, name) + return true + } + + return false +} + +func checkSubcommandHelp(c *Context) bool { + if c.Bool("h") || c.Bool("help") { + ShowSubcommandHelp(c) + return true + } + + return false +} + +func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { + if !a.EnableBashCompletion { + return false, arguments + } + + pos := len(arguments) - 1 + lastArg := arguments[pos] + + if lastArg != "--"+BashCompletionFlag.GetName() { + return false, arguments + } + + return true, arguments[:pos] +} + +func checkCompletions(c *Context) bool { + if !c.shellComplete { + return false + } + + if args := c.Args(); args.Present() { + name := args.First() + if cmd := c.App.Command(name); cmd != nil { + // let the command handle the completion + return false + } + } + + ShowCompletions(c) + return true +} + +func checkCommandCompletions(c *Context, name string) bool { + if !c.shellComplete { + return false + } + + ShowCommandCompletions(c, name) + return true +} diff --git a/vendor/github.com/urfave/cli/runtests b/vendor/github.com/urfave/cli/runtests new file mode 100644 index 0000000000..ee22bdeed5 --- /dev/null +++ b/vendor/github.com/urfave/cli/runtests @@ -0,0 +1,122 @@ +#!/usr/bin/env python +from __future__ import print_function + +import argparse +import os +import sys +import tempfile + +from subprocess import check_call, check_output + + +PACKAGE_NAME = os.environ.get( + 'CLI_PACKAGE_NAME', 'github.com/urfave/cli' +) + + +def main(sysargs=sys.argv[:]): + targets = { + 'vet': _vet, + 'test': _test, + 'gfmrun': _gfmrun, + 'toc': _toc, + 'gen': _gen, + } + + parser = argparse.ArgumentParser() + parser.add_argument( + 'target', nargs='?', choices=tuple(targets.keys()), default='test' + ) + args = parser.parse_args(sysargs[1:]) + + targets[args.target]() + return 0 + + +def _test(): + if check_output('go version'.split()).split()[2] < 'go1.2': + _run('go test -v .') + return + + coverprofiles = [] + for subpackage in ['', 'altsrc']: + coverprofile = 'cli.coverprofile' + if subpackage != '': + coverprofile = '{}.coverprofile'.format(subpackage) + + coverprofiles.append(coverprofile) + + _run('go test -v'.split() + [ + '-coverprofile={}'.format(coverprofile), + ('{}/{}'.format(PACKAGE_NAME, subpackage)).rstrip('/') + ]) + + combined_name = _combine_coverprofiles(coverprofiles) + _run('go tool cover -func={}'.format(combined_name)) + os.remove(combined_name) + + +def _gfmrun(): + go_version = check_output('go version'.split()).split()[2] + if go_version < 'go1.3': + print('runtests: skip on {}'.format(go_version), file=sys.stderr) + return + _run(['gfmrun', '-c', str(_gfmrun_count()), '-s', 'README.md']) + + +def _vet(): + _run('go vet ./...') + + +def _toc(): + _run('node_modules/.bin/markdown-toc -i README.md') + _run('git diff --exit-code') + + +def _gen(): + go_version = check_output('go version'.split()).split()[2] + if go_version < 'go1.5': + print('runtests: skip on {}'.format(go_version), file=sys.stderr) + return + + _run('go generate ./...') + _run('git diff --exit-code') + + +def _run(command): + if hasattr(command, 'split'): + command = command.split() + print('runtests: {}'.format(' '.join(command)), file=sys.stderr) + check_call(command) + + +def _gfmrun_count(): + with open('README.md') as infile: + lines = infile.read().splitlines() + return len(filter(_is_go_runnable, lines)) + + +def _is_go_runnable(line): + return line.startswith('package main') + + +def _combine_coverprofiles(coverprofiles): + combined = tempfile.NamedTemporaryFile( + suffix='.coverprofile', delete=False + ) + combined.write('mode: set\n') + + for coverprofile in coverprofiles: + with open(coverprofile, 'r') as infile: + for line in infile.readlines(): + if not line.startswith('mode: '): + combined.write(line) + + combined.flush() + name = combined.name + combined.close() + return name + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/vendor/github.com/vbatts/go-mtree/.gitignore b/vendor/github.com/vbatts/go-mtree/.gitignore new file mode 100644 index 0000000000..1ddf2a6104 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/.gitignore @@ -0,0 +1,6 @@ +*~ +.cli.test +.lint +.test +.vet +gomtree diff --git a/vendor/github.com/vbatts/go-mtree/.travis.yml b/vendor/github.com/vbatts/go-mtree/.travis.yml new file mode 100644 index 0000000000..39020cc9e5 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/.travis.yml @@ -0,0 +1,20 @@ +language: go +go: + - 1.x + - 1.10.x + - 1.9.x + - 1.8.x + +sudo: false + +before_install: + - git config --global url."https://".insteadOf git:// + - make install.tools + - mkdir -p $GOPATH/src/github.com/vbatts && ln -sf $(pwd) $GOPATH/src/github.com/vbatts/go-mtree + +install: true + +script: + - make validation + - make validation.tags + - make build.arches diff --git a/vendor/github.com/vbatts/go-mtree/LICENSE b/vendor/github.com/vbatts/go-mtree/LICENSE new file mode 100644 index 0000000000..857957aa16 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2016 Vincent Batts, Raleigh, NC, USA + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vbatts/go-mtree/Makefile b/vendor/github.com/vbatts/go-mtree/Makefile new file mode 100644 index 0000000000..a4ec422374 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/Makefile @@ -0,0 +1,85 @@ + +BUILD := gomtree +BUILDPATH := github.com/vbatts/go-mtree/cmd/gomtree +CWD := $(shell pwd) +SOURCE_FILES := $(shell find . -type f -name "*.go") +CLEAN_FILES := *~ +TAGS := +ARCHES := linux,386 linux,amd64 linux,arm linux,arm64 openbsd,amd64 windows,amd64 darwin,amd64 + +default: build validation + +.PHONY: validation +validation: .test .lint .vet .cli.test + +.PHONY: validation.tags +validation.tags: .test.tags .vet.tags .cli.test + +.PHONY: test +test: .test + +CLEAN_FILES += .test .test.tags + +.test: $(SOURCE_FILES) + go test -v $$(glide novendor) && touch $@ + +.test.tags: $(SOURCE_FILES) + set -e ; for tag in $(TAGS) ; do go test -tags $$tag -v $$(glide novendor) ; done && touch $@ + +.PHONY: lint +lint: .lint + +CLEAN_FILES += .lint + +.lint: $(SOURCE_FILES) + set -e ; for dir in $$(glide novendor) ; do golint -set_exit_status $$dir ; done && touch $@ + +.PHONY: vet +vet: .vet .vet.tags + +CLEAN_FILES += .vet .vet.tags + +.vet: $(SOURCE_FILES) + go vet $$(glide novendor) && touch $@ + +.vet.tags: $(SOURCE_FILES) + set -e ; for tag in $(TAGS) ; do go vet -tags $$tag -v $$(glide novendor) ; done && touch $@ + +.PHONY: cli.test +cli.test: .cli.test + +CLEAN_FILES += .cli.test .cli.test.tags + +.cli.test: $(BUILD) $(wildcard ./test/cli/*.sh) + @go run ./test/cli.go ./test/cli/*.sh && touch $@ + +.cli.test.tags: $(BUILD) $(wildcard ./test/cli/*.sh) + @set -e ; for tag in $(TAGS) ; do go run -tags $$tag ./test/cli.go ./test/cli/*.sh ; done && touch $@ + +.PHONY: build +build: $(BUILD) + +$(BUILD): $(SOURCE_FILES) + go build -o $(BUILD) $(BUILDPATH) + +install.tools: + go get -u -v github.com/Masterminds/glide + go get -u -v golang.org/x/lint/golint + +./bin: + mkdir -p $@ + +CLEAN_FILES += bin + +build.arches: ./bin + @set -e ;\ + for pair in $(ARCHES); do \ + p=$$(echo $$pair | cut -d , -f 1);\ + a=$$(echo $$pair | cut -d , -f 2);\ + echo "Building $$p/$$a ...";\ + GOOS=$$p GOARCH=$$a go build -o ./bin/gomtree.$$p.$$a $(BUILDPATH) ;\ + done + +clean: + rm -rf $(BUILD) $(CLEAN_FILES) + diff --git a/vendor/github.com/vbatts/go-mtree/README.md b/vendor/github.com/vbatts/go-mtree/README.md new file mode 100644 index 0000000000..b285a6f063 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/README.md @@ -0,0 +1,213 @@ +# go-mtree + +[![Go Report Card](https://goreportcard.com/badge/github.com/vbatts/go-mtree)](https://goreportcard.com/report/github.com/vbatts/go-mtree) + +`mtree` is a filesystem hierarchy validation tooling and format. +This is a library and simple cli tool for [mtree(8)][mtree(8)] support. + +While the traditional `mtree` cli utility is primarily on BSDs (FreeBSD, +openBSD, etc), even broader support for the `mtree` specification format is +provided with libarchive ([libarchive-formats(5)][libarchive-formats(5)]). + +There is also an [mtree port for Linux][archiecobbs/mtree-port] though it is +not widely packaged for Linux distributions. + + +## Format + +The format of hierarchy specification is consistent with the `# mtree v2.0` +format. Both the BSD `mtree` and libarchive ought to be interoperable with it +with only one definite caveat. On Linux, extended attributes (`xattr`) on +files are often a critical aspect of the file, holding ACLs, capabilities, etc. +While FreeBSD filesystem do support `extattr`, this feature has not made its +way into their `mtree`. + +This implementation of mtree supports a few non-upstream "keyword"s, such as: +`xattr` and `tar_time`. If you include these keywords, the FreeBSD `mtree` +will fail, as they are unknown keywords to that implementation. + +To have `go-mtree` produce specifications that will be +strictly compatible with the BSD `mtree`, use the `-bsd-keywords` flag when +creating a manifest. This will make sure that only the keywords supported by +BSD `mtree` are used in the program. + + +### Typical form + +With the standard keywords, plus say `sha256digest`, the hierarchy +specification looks like: + +```mtree +# . +/set type=file nlink=1 mode=0664 uid=1000 gid=100 +. size=4096 type=dir mode=0755 nlink=6 time=1459370393.273231538 + LICENSE size=1502 mode=0644 time=1458851690.0 sha256digest=ef4e53d83096be56dc38dbf9bc8ba9e3068bec1ec37c179033d1e8f99a1c2a95 + README.md size=2820 mode=0644 time=1459370256.316148361 sha256digest=d9b955134d99f84b17c0a711ce507515cc93cd7080a9dcd50400e3d993d876ac + +[...] +``` + +See the directory presently in, and the files present. Along with each +path, is provided the keywords and the unique values for each path. Any common +keyword and values are established in the `/set` command. + + +### Extended attributes form + +```mtree +# . +/set type=file nlink=1 mode=0664 uid=1000 gid=1000 +. size=4096 type=dir mode=0775 nlink=6 time=1459370191.11179595 xattr.security.selinux=dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA== + LICENSE size=1502 time=1458851690.583562292 xattr.security.selinux=dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA== + README.md size=2366 mode=0644 time=1459369604.0 xattr.security.selinux=dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA== + +[...] +``` + +See the keyword prefixed with `xattr.` followed by the extended attribute's +namespace and keyword. This setup is consistent for use with Linux extended +attributes as well as FreeBSD extended attributes. + +Since extended attributes are an unordered hashmap, this approach allows for +checking each `.` individually. + +The value is the [base64 encoded][base64] of the value of the particular +extended attribute. Since the values themselves could be raw bytes, this +approach avoids issues with encoding. + +### Tar form + +```mtree +# . +/set type=file mode=0664 uid=1000 gid=1000 +. type=dir mode=0775 tar_time=1468430408.000000000 + +# samedir +samedir type=dir mode=0775 tar_time=1468000972.000000000 + file2 size=0 tar_time=1467999782.000000000 + file1 size=0 tar_time=1467999781.000000000 + +[...] +``` + +While `go-mtree` serves mainly as a library for upstream `mtree` support, +`go-mtree` is also compatible with [tar archives][tar] (which is not an upstream feature). +This means that we can now create and validate a manifest by specifying a tar file. +More interestingly, this also means that we can create a manifest from an archive, and then +validate this manifest against a filesystem hierarchy that's on disk, and vice versa. + +Notice that for the output of creating a validation manifest from a tar file, the default behavior +for evaluating a notion of time is to use the `tar_time` keyword. In the +"filesystem hierarchy" format of mtree, `time` is being evaluated with +nanosecond precision. However, GNU tar truncates a file's modification time +to 1-second precision. That is, if a file's full modification time is +123456789.123456789, the "tar time" equivalent would be 123456789.000000000. +This way, if you validate a manifest created using a tar file against an +actual root directory, there will be no complaints from `go-mtree` so long as the +1-second precision time of a file in the root directory is the same. + + +## Usage + +To use the Go programming language library, see [the docs][godoc]. + +To use the command line tool, first [build it](#Building), then the following. + + +### Create a manifest + +This will also include the sha512 digest of the files. + +```bash +gomtree -c -K sha512digest -p . > /tmp/root.mtree +``` + +With a tar file: + +```bash +gomtree -c -K sha512digest -T sometarfile.tar > /tmp/tar.mtree +``` + +### Validate a manifest + +```bash +gomtree -p . -f /tmp/root.mtree +``` + +With a tar file: + +```bash +gomtree -T sometarfile.tar -f /tmp/root.mtree +``` + +### See the supported keywords + +```bash +gomtree -list-keywords +Available keywords: + uname + sha1 + sha1digest + sha256digest + xattrs (not upstream) + link (default) + nlink (default) + md5digest + rmd160digest + mode (default) + cksum + md5 + rmd160 + type (default) + time (default) + uid (default) + gid (default) + sha256 + sha384 + sha512 + xattr (not upstream) + tar_time (not upstream) + size (default) + ripemd160digest + sha384digest + sha512digest +``` + + +## Building + +Either: + +```bash +go get github.com/vbatts/go-mtree/cmd/gomtree +``` + +or + +```bash +git clone git://github.com/vbatts/go-mtree.git $GOPATH/src/github.com/vbatts/go-mtree +cd $GOPATH/src/github.com/vbatts/go-mtree +go build ./cmd/gomtree +``` + +## Testing + +On Linux: +```bash +cd $GOPATH/src/github.com/vbatts/go-mtree +make +``` + +On FreeBSD: +```bash +cd $GOPATH/src/github.com/vbatts/go-mtree +gmake +``` + + +[mtree(8)]: https://www.freebsd.org/cgi/man.cgi?mtree(8) +[libarchive-formats(5)]: https://www.freebsd.org/cgi/man.cgi?query=libarchive-formats&sektion=5&n=1 +[archiecobbs/mtree-port]: https://github.com/archiecobbs/mtree-port +[godoc]: https://godoc.org/github.com/vbatts/go-mtree +[tar]: http://man7.org/linux/man-pages/man1/tar.1.html +[base64]: https://tools.ietf.org/html/rfc4648 diff --git a/vendor/github.com/vbatts/go-mtree/check.go b/vendor/github.com/vbatts/go-mtree/check.go new file mode 100644 index 0000000000..29e05e3144 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/check.go @@ -0,0 +1,30 @@ +package mtree + +// Check a root directory path against the DirectoryHierarchy, regarding only +// the available keywords from the list and each entry in the hierarchy. +// If keywords is nil, the check all present in the DirectoryHierarchy +// +// This is equivalent to creating a new DirectoryHierarchy with Walk(root, nil, +// keywords, fs) and then doing a Compare(dh, newDh, keywords). +func Check(root string, dh *DirectoryHierarchy, keywords []Keyword, fs FsEval) ([]InodeDelta, error) { + if keywords == nil { + keywords = dh.UsedKeywords() + } + + newDh, err := Walk(root, nil, keywords, fs) + if err != nil { + return nil, err + } + + return Compare(dh, newDh, keywords) +} + +// TarCheck is the tar equivalent of checking a file hierarchy spec against a +// tar stream to determine if files have been changed. This is precisely +// equivalent to Compare(dh, tarDH, keywords). +func TarCheck(tarDH, dh *DirectoryHierarchy, keywords []Keyword) ([]InodeDelta, error) { + if keywords == nil { + return Compare(dh, tarDH, dh.UsedKeywords()) + } + return Compare(dh, tarDH, keywords) +} diff --git a/vendor/github.com/vbatts/go-mtree/cksum.go b/vendor/github.com/vbatts/go-mtree/cksum.go new file mode 100644 index 0000000000..2247cac9e8 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/cksum.go @@ -0,0 +1,49 @@ +package mtree + +import ( + "bufio" + "io" +) + +const posixPolynomial uint32 = 0x04C11DB7 + +// cksum is an implementation of the POSIX CRC algorithm +func cksum(r io.Reader) (uint32, int, error) { + in := bufio.NewReader(r) + count := 0 + var sum uint32 + f := func(b byte) { + for i := 7; i >= 0; i-- { + msb := sum & (1 << 31) + sum = sum << 1 + if msb != 0 { + sum = sum ^ posixPolynomial + } + } + sum ^= uint32(b) + } + + for done := false; !done; { + switch b, err := in.ReadByte(); err { + case io.EOF: + done = true + case nil: + f(b) + count++ + default: + return ^sum, count, err + } + } + for m := count; ; { + f(byte(m) & 0xff) + m = m >> 8 + if m == 0 { + break + } + } + f(0) + f(0) + f(0) + f(0) + return ^sum, count, nil +} diff --git a/vendor/github.com/vbatts/go-mtree/compare.go b/vendor/github.com/vbatts/go-mtree/compare.go new file mode 100644 index 0000000000..458df857ee --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/compare.go @@ -0,0 +1,448 @@ +package mtree + +import ( + "encoding/json" + "fmt" + "strconv" +) + +// XXX: Do we need a Difference interface to make it so people can do var x +// Difference = ? The main problem is that keys and inodes need to +// have different interfaces, so it's just a pain. + +// DifferenceType represents the type of a discrepancy encountered for +// an object. This is also used to represent discrepancies between keys +// for objects. +type DifferenceType string + +const ( + // Missing represents a discrepancy where the object is present in + // the @old manifest but is not present in the @new manifest. + Missing DifferenceType = "missing" + + // Extra represents a discrepancy where the object is not present in + // the @old manifest but is present in the @new manifest. + Extra DifferenceType = "extra" + + // Modified represents a discrepancy where the object is present in + // both the @old and @new manifests, but one or more of the keys + // have different values (or have not been set in one of the + // manifests). + Modified DifferenceType = "modified" + + // ErrorDifference represents an attempted update to the values of + // a keyword that failed + ErrorDifference DifferenceType = "errored" +) + +// These functions return *type from the parameter. It's just shorthand, to +// ensure that we don't accidentally expose pointers to the caller that are +// internal data. +func ePtr(e Entry) *Entry { return &e } +func sPtr(s string) *string { return &s } + +// InodeDelta Represents a discrepancy in a filesystem object between two +// DirectoryHierarchy manifests. Discrepancies are caused by entries only +// present in one manifest [Missing, Extra], keys only present in one of the +// manifests [Modified] or a difference between the keys of the same object in +// both manifests [Modified]. +type InodeDelta struct { + diff DifferenceType + path string + new Entry + old Entry + keys []KeyDelta +} + +// Type returns the type of discrepancy encountered when comparing this inode +// between the two DirectoryHierarchy manifests. +func (i InodeDelta) Type() DifferenceType { + return i.diff +} + +// Path returns the path to the inode (relative to the root of the +// DirectoryHierarchy manifests). +func (i InodeDelta) Path() string { + return i.path +} + +// Diff returns the set of key discrepancies between the two manifests for the +// specific inode. If the DifferenceType of the inode is not Modified, then +// Diff returns nil. +func (i InodeDelta) Diff() []KeyDelta { + return i.keys +} + +// Old returns the value of the inode Entry in the "old" DirectoryHierarchy (as +// determined by the ordering of parameters to Compare). +func (i InodeDelta) Old() *Entry { + if i.diff == Modified || i.diff == Missing { + return ePtr(i.old) + } + return nil +} + +// New returns the value of the inode Entry in the "new" DirectoryHierarchy (as +// determined by the ordering of parameters to Compare). +func (i InodeDelta) New() *Entry { + if i.diff == Modified || i.diff == Extra { + return ePtr(i.new) + } + return nil +} + +// MarshalJSON creates a JSON-encoded version of InodeDelta. +func (i InodeDelta) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type DifferenceType `json:"type"` + Path string `json:"path"` + Keys []KeyDelta `json:"keys"` + }{ + Type: i.diff, + Path: i.path, + Keys: i.keys, + }) +} + +// String returns a "pretty" formatting for InodeDelta. +func (i InodeDelta) String() string { + switch i.diff { + case Modified: + // Output the first failure. + f := i.keys[0] + return fmt.Sprintf("%q: keyword %q: expected %s; got %s", i.path, f.name, f.old, f.new) + case Extra: + return fmt.Sprintf("%q: unexpected path", i.path) + case Missing: + return fmt.Sprintf("%q: missing path", i.path) + default: + panic("programming error") + } +} + +// KeyDelta Represents a discrepancy in a key for a particular filesystem +// object between two DirectoryHierarchy manifests. Discrepancies are caused by +// keys only present in one manifest [Missing, Extra] or a difference between +// the keys of the same object in both manifests [Modified]. A set of these is +// returned with InodeDelta.Diff(). +type KeyDelta struct { + diff DifferenceType + name Keyword + old string + new string + err error // used for update delta results +} + +// Type returns the type of discrepancy encountered when comparing this key +// between the two DirectoryHierarchy manifests' relevant inode entry. +func (k KeyDelta) Type() DifferenceType { + return k.diff +} + +// Name returns the name (the key) of the KeyDeltaVal entry in the +// DirectoryHierarchy. +func (k KeyDelta) Name() Keyword { + return k.name +} + +// Old returns the value of the KeyDeltaVal entry in the "old" DirectoryHierarchy +// (as determined by the ordering of parameters to Compare). Returns nil if +// there was no entry in the "old" DirectoryHierarchy. +func (k KeyDelta) Old() *string { + if k.diff == Modified || k.diff == Missing { + return sPtr(k.old) + } + return nil +} + +// New returns the value of the KeyDeltaVal entry in the "new" DirectoryHierarchy +// (as determined by the ordering of parameters to Compare). Returns nil if +// there was no entry in the "old" DirectoryHierarchy. +func (k KeyDelta) New() *string { + if k.diff == Modified || k.diff == Extra { + return sPtr(k.old) + } + return nil +} + +// MarshalJSON creates a JSON-encoded version of KeyDelta. +func (k KeyDelta) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type DifferenceType `json:"type"` + Name Keyword `json:"name"` + Old string `json:"old"` + New string `json:"new"` + }{ + Type: k.diff, + Name: k.name, + Old: k.old, + New: k.new, + }) +} + +// Like Compare, but for single inode entries only. Used to compute the +// cached version of inode.keys. +func compareEntry(oldEntry, newEntry Entry) ([]KeyDelta, error) { + // Represents the new and old states for an entry's keys. + type stateT struct { + Old *KeyVal + New *KeyVal + } + + diffs := map[Keyword]*stateT{} + oldKeys := oldEntry.AllKeys() + newKeys := newEntry.AllKeys() + + // Fill the map with the old keys first. + for _, kv := range oldKeys { + key := kv.Keyword() + // only add this diff if the new keys has this keyword + if key != "tar_time" && key != "time" && key.Prefix() != "xattr" && len(HasKeyword(newKeys, key)) == 0 { + continue + } + + // Cannot take &kv because it's the iterator. + copy := new(KeyVal) + *copy = kv + + _, ok := diffs[key] + if !ok { + diffs[key] = new(stateT) + } + diffs[key].Old = copy + } + + // Then fill the new keys. + for _, kv := range newKeys { + key := kv.Keyword() + // only add this diff if the old keys has this keyword + if key != "tar_time" && key != "time" && key.Prefix() != "xattr" && len(HasKeyword(oldKeys, key)) == 0 { + continue + } + + // Cannot take &kv because it's the iterator. + copy := new(KeyVal) + *copy = kv + + _, ok := diffs[key] + if !ok { + diffs[key] = new(stateT) + } + diffs[key].New = copy + } + + // We need a full list of the keys so we can deal with different keyvalue + // orderings. + var kws []Keyword + for kw := range diffs { + kws = append(kws, kw) + } + + // If both tar_time and time were specified in the set of keys, we have to + // mess with the diffs. This is an unfortunate side-effect of tar archives. + // TODO(cyphar): This really should be abstracted inside keywords.go + if InKeywordSlice("tar_time", kws) && InKeywordSlice("time", kws) { + // Delete "time". + timeStateT := diffs["time"] + delete(diffs, "time") + + // Make a new tar_time. + if diffs["tar_time"].Old == nil { + time, err := strconv.ParseFloat(timeStateT.Old.Value(), 64) + if err != nil { + return nil, fmt.Errorf("failed to parse old time: %s", err) + } + + newTime := new(KeyVal) + *newTime = KeyVal(fmt.Sprintf("tar_time=%d.000000000", int64(time))) + + diffs["tar_time"].Old = newTime + } else if diffs["tar_time"].New == nil { + time, err := strconv.ParseFloat(timeStateT.New.Value(), 64) + if err != nil { + return nil, fmt.Errorf("failed to parse new time: %s", err) + } + + newTime := new(KeyVal) + *newTime = KeyVal(fmt.Sprintf("tar_time=%d.000000000", int64(time))) + + diffs["tar_time"].New = newTime + } else { + return nil, fmt.Errorf("time and tar_time set in the same manifest") + } + } + + // Are there any differences? + var results []KeyDelta + for name, diff := range diffs { + // Invalid + if diff.Old == nil && diff.New == nil { + return nil, fmt.Errorf("invalid state: both old and new are nil: key=%s", name) + } + + switch { + // Missing + case diff.New == nil: + results = append(results, KeyDelta{ + diff: Missing, + name: name, + old: diff.Old.Value(), + }) + + // Extra + case diff.Old == nil: + results = append(results, KeyDelta{ + diff: Extra, + name: name, + new: diff.New.Value(), + }) + + // Modified + default: + if !diff.Old.Equal(*diff.New) { + results = append(results, KeyDelta{ + diff: Modified, + name: name, + old: diff.Old.Value(), + new: diff.New.Value(), + }) + } + } + } + + return results, nil +} + +// Compare compares two directory hierarchy manifests, and returns the +// list of discrepancies between the two. All of the entries in the +// manifest are considered, with differences being generated for +// RelativeType and FullType entries. Differences in structure (such as +// the way /set and /unset are written) are not considered to be +// discrepancies. The list of differences are all filesystem objects. +// +// keys controls which keys will be compared, but if keys is nil then all +// possible keys will be compared between the two manifests (allowing for +// missing entries and the like). A missing or extra key is treated as a +// Modified type. +// +// If oldDh or newDh are empty, we assume they are a hierarchy that is +// completely empty. This is purely for helping callers create synthetic +// InodeDeltas. +// +// NB: The order of the parameters matters (old, new) because Extra and +// Missing are considered as different discrepancy types. +func Compare(oldDh, newDh *DirectoryHierarchy, keys []Keyword) ([]InodeDelta, error) { + // Represents the new and old states for an entry. + type stateT struct { + Old *Entry + New *Entry + } + + // To deal with different orderings of the entries, use a path-keyed + // map to make sure we don't start comparing unrelated entries. + diffs := map[string]*stateT{} + + // First, iterate over the old hierarchy. If nil, pretend it's empty. + if oldDh != nil { + for _, e := range oldDh.Entries { + if e.Type == RelativeType || e.Type == FullType { + path, err := e.Path() + if err != nil { + return nil, err + } + //fmt.Printf("new: %q\n", path) + + // Cannot take &kv because it's the iterator. + cEntry := new(Entry) + *cEntry = e + + _, ok := diffs[path] + if !ok { + diffs[path] = &stateT{} + } + diffs[path].Old = cEntry + } + } + } + + // Then, iterate over the new hierarchy. If nil, pretend it's empty. + if newDh != nil { + for _, e := range newDh.Entries { + if e.Type == RelativeType || e.Type == FullType { + path, err := e.Path() + if err != nil { + return nil, err + } + //fmt.Printf("old: %q\n", path) + + // Cannot take &kv because it's the iterator. + cEntry := new(Entry) + *cEntry = e + + _, ok := diffs[path] + if !ok { + diffs[path] = &stateT{} + } + diffs[path].New = cEntry + } + } + } + + // Now we compute the diff. + var results []InodeDelta + for path, diff := range diffs { + // Invalid + if diff.Old == nil && diff.New == nil { + return nil, fmt.Errorf("invalid state: both old and new are nil: path=%s", path) + } + + switch { + // Missing + case diff.New == nil: + results = append(results, InodeDelta{ + diff: Missing, + path: path, + old: *diff.Old, + }) + + // Extra + case diff.Old == nil: + results = append(results, InodeDelta{ + diff: Extra, + path: path, + new: *diff.New, + }) + + // Modified + default: + changed, err := compareEntry(*diff.Old, *diff.New) + if err != nil { + return nil, fmt.Errorf("comparison failed %s: %s", path, err) + } + + // Now remove "changed" entries that don't match the keys. + if keys != nil { + var filterChanged []KeyDelta + for _, keyDiff := range changed { + if InKeywordSlice(keyDiff.name.Prefix(), keys) { + filterChanged = append(filterChanged, keyDiff) + } + } + changed = filterChanged + } + + // Check if there were any actual changes. + if len(changed) > 0 { + results = append(results, InodeDelta{ + diff: Modified, + path: path, + old: *diff.Old, + new: *diff.New, + keys: changed, + }) + } + } + } + + return results, nil +} diff --git a/vendor/github.com/vbatts/go-mtree/creator.go b/vendor/github.com/vbatts/go-mtree/creator.go new file mode 100644 index 0000000000..43149c1302 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/creator.go @@ -0,0 +1,10 @@ +package mtree + +// dhCreator is used in when building a DirectoryHierarchy +type dhCreator struct { + DH *DirectoryHierarchy + fs FsEval + curSet *Entry + curDir *Entry + curEnt *Entry +} diff --git a/vendor/github.com/vbatts/go-mtree/entry.go b/vendor/github.com/vbatts/go-mtree/entry.go new file mode 100644 index 0000000000..fc8c1c9d8d --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/entry.go @@ -0,0 +1,152 @@ +package mtree + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/vbatts/go-mtree/pkg/govis" +) + +type byPos []Entry + +func (bp byPos) Len() int { return len(bp) } +func (bp byPos) Less(i, j int) bool { return bp[i].Pos < bp[j].Pos } +func (bp byPos) Swap(i, j int) { bp[i], bp[j] = bp[j], bp[i] } + +// Entry is each component of content in the mtree spec file +type Entry struct { + Parent *Entry // up + Children []*Entry // down + Prev, Next *Entry // left, right + Set *Entry // current `/set` for additional keywords + Pos int // order in the spec + Raw string // file or directory name + Name string // file or directory name + Keywords []KeyVal // TODO(vbatts) maybe a keyword typed set of values? + Type EntryType +} + +// Descend searches thru an Entry's children to find the Entry associated with +// `filename`. Directories are stored at the end of an Entry's children so do a +// traverse backwards. If you descend to a "." +func (e Entry) Descend(filename string) *Entry { + if filename == "." || filename == "" { + return &e + } + numChildren := len(e.Children) + for i := range e.Children { + c := e.Children[numChildren-1-i] + if c.Name == filename { + return c + } + } + return nil +} + +// Find is a wrapper around Descend that takes in a whole string path and tries +// to find that Entry +func (e Entry) Find(filepath string) *Entry { + resultnode := &e + for _, path := range strings.Split(filepath, "/") { + encoded, err := govis.Vis(path, DefaultVisFlags) + if err != nil { + return nil + } + resultnode = resultnode.Descend(encoded) + if resultnode == nil { + return nil + } + } + return resultnode +} + +// Ascend gets the parent of an Entry. Serves mainly to maintain readability +// when traversing up and down an Entry tree +func (e Entry) Ascend() *Entry { + return e.Parent +} + +// Path provides the full path of the file, despite RelativeType or FullType. It +// will be in Unvis'd form. +func (e Entry) Path() (string, error) { + decodedName, err := govis.Unvis(e.Name, DefaultVisFlags) + if err != nil { + return "", err + } + if e.Parent == nil || e.Type == FullType { + return filepath.Clean(decodedName), nil + } + parentName, err := e.Parent.Path() + if err != nil { + return "", err + } + return filepath.Clean(filepath.Join(parentName, decodedName)), nil +} + +// String joins a file with its associated keywords. The file name will be the +// Vis'd encoded version so that it can be parsed appropriately when Check'd. +func (e Entry) String() string { + if e.Raw != "" { + return e.Raw + } + if e.Type == BlankType { + return "" + } + if e.Type == DotDotType { + return e.Name + } + if e.Type == SpecialType || e.Type == FullType || inKeyValSlice("type=dir", e.Keywords) { + return fmt.Sprintf("%s %s", e.Name, strings.Join(KeyValToString(e.Keywords), " ")) + } + return fmt.Sprintf(" %s %s", e.Name, strings.Join(KeyValToString(e.Keywords), " ")) +} + +// AllKeys returns the full set of KeyVal for the given entry, based on the +// /set keys as well as the entry-local keys. Entry-local keys always take +// precedence. +func (e Entry) AllKeys() []KeyVal { + if e.Set != nil { + return MergeKeyValSet(e.Set.Keywords, e.Keywords) + } + return e.Keywords +} + +// IsDir checks the type= value for this entry on whether it is a directory +func (e Entry) IsDir() bool { + for _, kv := range e.AllKeys() { + if kv.Keyword().Prefix() == "type" { + return kv.Value() == "dir" + } + } + return false +} + +// EntryType are the formats of lines in an mtree spec file +type EntryType int + +// The types of lines to be found in an mtree spec file +const ( + SignatureType EntryType = iota // first line of the file, like `#mtree v2.0` + BlankType // blank lines are ignored + CommentType // Lines beginning with `#` are ignored + SpecialType // line that has `/` prefix issue a "special" command (currently only /set and /unset) + RelativeType // if the first white-space delimited word does not have a '/' in it. Options/keywords are applied. + DotDotType // .. - A relative path step. keywords/options are ignored + FullType // if the first word on the line has a `/` after the first character, it interpretted as a file pathname with options +) + +// String returns the name of the EntryType +func (et EntryType) String() string { + return typeNames[et] +} + +var typeNames = map[EntryType]string{ + SignatureType: "SignatureType", + BlankType: "BlankType", + CommentType: "CommentType", + SpecialType: "SpecialType", + RelativeType: "RelativeType", + DotDotType: "DotDotType", + FullType: "FullType", +} diff --git a/vendor/github.com/vbatts/go-mtree/fseval.go b/vendor/github.com/vbatts/go-mtree/fseval.go new file mode 100644 index 0000000000..2f006c537a --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/fseval.go @@ -0,0 +1,54 @@ +package mtree + +import "os" + +// FsEval is a mock-friendly method of specifying to go-mtree how to carry out +// filesystem operations such as opening files and the like. The semantics of +// all of these wrappers MUST be identical to the semantics described here. +type FsEval interface { + // Open must have the same semantics as os.Open. + Open(path string) (*os.File, error) + + // Lstat must have the same semantics as os.Lstat. + Lstat(path string) (os.FileInfo, error) + + // Readdir must have the same semantics as calling os.Open on the given + // path and then returning the result of (*os.File).Readdir(-1). + Readdir(path string) ([]os.FileInfo, error) + + // KeywordFunc must return a wrapper around the provided function (in other + // words, the returned function must refer to the same keyword). + KeywordFunc(fn KeywordFunc) KeywordFunc +} + +// DefaultFsEval is the default implementation of FsEval (and is the default +// used if a nil interface is passed to any mtree function). It does not modify +// or wrap any of the methods (they all just call out to os.*). +type DefaultFsEval struct{} + +// Open must have the same semantics as os.Open. +func (fs DefaultFsEval) Open(path string) (*os.File, error) { + return os.Open(path) +} + +// Lstat must have the same semantics as os.Lstat. +func (fs DefaultFsEval) Lstat(path string) (os.FileInfo, error) { + return os.Lstat(path) +} + +// Readdir must have the same semantics as calling os.Open on the given +// path and then returning the result of (*os.File).Readdir(-1). +func (fs DefaultFsEval) Readdir(path string) ([]os.FileInfo, error) { + fh, err := os.Open(path) + if err != nil { + return nil, err + } + defer fh.Close() + return fh.Readdir(-1) +} + +// KeywordFunc must return a wrapper around the provided function (in other +// words, the returned function must refer to the same keyword). +func (fs DefaultFsEval) KeywordFunc(fn KeywordFunc) KeywordFunc { + return fn +} diff --git a/vendor/github.com/vbatts/go-mtree/glide.lock b/vendor/github.com/vbatts/go-mtree/glide.lock new file mode 100644 index 0000000000..4d197f78d3 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/glide.lock @@ -0,0 +1,21 @@ +hash: 8b0df7f603e6b580aa2640d99d3fa7430198f7db89321ff2abf76efa969d14c2 +updated: 2018-08-20T07:56:40.333174254-04:00 +imports: +- name: github.com/fatih/color + version: 5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4 +- name: github.com/sirupsen/logrus + version: 3e01752db0189b9157070a0e1668a620f9a85da2 +- name: golang.org/x/crypto + version: 1351f936d976c60a0a48d728281922cf63eafb8d + subpackages: + - ripemd160 + - ssh/terminal +- name: golang.org/x/sys + version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 + subpackages: + - unix +testImports: +- name: github.com/davecgh/go-spew + version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73 + subpackages: + - spew diff --git a/vendor/github.com/vbatts/go-mtree/glide.yaml b/vendor/github.com/vbatts/go-mtree/glide.yaml new file mode 100644 index 0000000000..3e78783459 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/glide.yaml @@ -0,0 +1,16 @@ +package: github.com/vbatts/go-mtree +description: File systems verification utility and library, in likeness of mtree(8) +homepage: https://github.com/vbatts/go-mtree +license: BSD-3-Clause +import: +- package: golang.org/x/crypto + subpackages: + - ripemd160 +- package: github.com/sirupsen/logrus + version: ^1.0.0 +- package: golang.org/x/sys + version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 + subpackages: + - unix +- package: github.com/fatih/color + version: ^1.6.0 diff --git a/vendor/github.com/vbatts/go-mtree/hierarchy.go b/vendor/github.com/vbatts/go-mtree/hierarchy.go new file mode 100644 index 0000000000..0c3b8953c0 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/hierarchy.go @@ -0,0 +1,48 @@ +package mtree + +import ( + "io" + "sort" +) + +// DirectoryHierarchy is the mapped structure for an mtree directory hierarchy +// spec +type DirectoryHierarchy struct { + Entries []Entry +} + +// WriteTo simplifies the output of the resulting hierarchy spec +func (dh DirectoryHierarchy) WriteTo(w io.Writer) (n int64, err error) { + sort.Sort(byPos(dh.Entries)) + var sum int64 + for _, e := range dh.Entries { + str := e.String() + i, err := io.WriteString(w, str+"\n") + if err != nil { + return sum, err + } + sum += int64(i) + } + return sum, nil +} + +// UsedKeywords collects and returns all the keywords used in a +// a DirectoryHierarchy +func (dh DirectoryHierarchy) UsedKeywords() []Keyword { + usedkeywords := []Keyword{} + for _, e := range dh.Entries { + switch e.Type { + case FullType, RelativeType, SpecialType: + if e.Type != SpecialType || e.Name == "/set" { + kvs := e.Keywords + for _, kv := range kvs { + kw := KeyVal(kv).Keyword().Prefix() + if !InKeywordSlice(kw, usedkeywords) { + usedkeywords = append(usedkeywords, KeywordSynonym(string(kw))) + } + } + } + } + } + return usedkeywords +} diff --git a/vendor/github.com/vbatts/go-mtree/keywordfunc.go b/vendor/github.com/vbatts/go-mtree/keywordfunc.go new file mode 100644 index 0000000000..7a8a170038 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/keywordfunc.go @@ -0,0 +1,172 @@ +package mtree + +import ( + "archive/tar" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + "io" + "os" + + "github.com/vbatts/go-mtree/pkg/govis" + "golang.org/x/crypto/ripemd160" +) + +// KeywordFunc is the type of a function called on each file to be included in +// a DirectoryHierarchy, that will produce the string output of the keyword to +// be included for the file entry. Otherwise, empty string. +// io.Reader `r` is to the file stream for the file payload. While this +// function takes an io.Reader, the caller needs to reset it to the beginning +// for each new KeywordFunc +type KeywordFunc func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) + +var ( + // KeywordFuncs is the map of all keywords (and the functions to produce them) + KeywordFuncs = map[Keyword]KeywordFunc{ + "size": sizeKeywordFunc, // The size, in bytes, of the file + "type": typeKeywordFunc, // The type of the file + "time": timeKeywordFunc, // The last modification time of the file + "link": linkKeywordFunc, // The target of the symbolic link when type=link + "uid": uidKeywordFunc, // The file owner as a numeric value + "gid": gidKeywordFunc, // The file group as a numeric value + "nlink": nlinkKeywordFunc, // The number of hard links the file is expected to have + "uname": unameKeywordFunc, // The file owner as a symbolic name + "gname": gnameKeywordFunc, // The file group as a symbolic name + "mode": modeKeywordFunc, // The current file's permissions as a numeric (octal) or symbolic value + "cksum": cksumKeywordFunc, // The checksum of the file using the default algorithm specified by the cksum(1) utility + "md5": hasherKeywordFunc("md5digest", md5.New), // The MD5 message digest of the file + "md5digest": hasherKeywordFunc("md5digest", md5.New), // A synonym for `md5` + "rmd160": hasherKeywordFunc("ripemd160digest", ripemd160.New), // The RIPEMD160 message digest of the file + "rmd160digest": hasherKeywordFunc("ripemd160digest", ripemd160.New), // A synonym for `rmd160` + "ripemd160digest": hasherKeywordFunc("ripemd160digest", ripemd160.New), // A synonym for `rmd160` + "sha1": hasherKeywordFunc("sha1digest", sha1.New), // The SHA1 message digest of the file + "sha1digest": hasherKeywordFunc("sha1digest", sha1.New), // A synonym for `sha1` + "sha256": hasherKeywordFunc("sha256digest", sha256.New), // The SHA256 message digest of the file + "sha256digest": hasherKeywordFunc("sha256digest", sha256.New), // A synonym for `sha256` + "sha384": hasherKeywordFunc("sha384digest", sha512.New384), // The SHA384 message digest of the file + "sha384digest": hasherKeywordFunc("sha384digest", sha512.New384), // A synonym for `sha384` + "sha512": hasherKeywordFunc("sha512digest", sha512.New), // The SHA512 message digest of the file + "sha512digest": hasherKeywordFunc("sha512digest", sha512.New), // A synonym for `sha512` + "sha512256": hasherKeywordFunc("sha512digest", sha512.New512_256), // The SHA512/256 message digest of the file + "sha512256digest": hasherKeywordFunc("sha512digest", sha512.New512_256), // A synonym for `sha512256` + + "flags": flagsKeywordFunc, // NOTE: this is a noop, but here to support the presence of the "flags" keyword. + + // This is not an upstreamed keyword, but used to vary from "time", as tar + // archives do not store nanosecond precision. So comparing on "time" will + // be only seconds level accurate. + "tar_time": tartimeKeywordFunc, // The last modification time of the file, from a tar archive mtime + + // This is not an upstreamed keyword, but a needed attribute for file validation. + // The pattern for this keyword key is prefixed by "xattr." followed by the extended attribute "namespace.key". + // The keyword value is the SHA1 digest of the extended attribute's value. + // In this way, the order of the keys does not matter, and the contents of the value is not revealed. + "xattr": xattrKeywordFunc, + "xattrs": xattrKeywordFunc, + } +) +var ( + modeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + permissions := info.Mode().Perm() + if os.ModeSetuid&info.Mode() > 0 { + permissions |= (1 << 11) + } + if os.ModeSetgid&info.Mode() > 0 { + permissions |= (1 << 10) + } + if os.ModeSticky&info.Mode() > 0 { + permissions |= (1 << 9) + } + return []KeyVal{KeyVal(fmt.Sprintf("mode=%#o", permissions))}, nil + } + sizeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if sys, ok := info.Sys().(*tar.Header); ok { + if sys.Typeflag == tar.TypeSymlink { + return []KeyVal{KeyVal(fmt.Sprintf("size=%d", len(sys.Linkname)))}, nil + } + } + return []KeyVal{KeyVal(fmt.Sprintf("size=%d", info.Size()))}, nil + } + cksumKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if !info.Mode().IsRegular() { + return nil, nil + } + sum, _, err := cksum(r) + if err != nil { + return nil, err + } + return []KeyVal{KeyVal(fmt.Sprintf("cksum=%d", sum))}, nil + } + hasherKeywordFunc = func(name string, newHash func() hash.Hash) KeywordFunc { + return func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if !info.Mode().IsRegular() { + return nil, nil + } + h := newHash() + if _, err := io.Copy(h, r); err != nil { + return nil, err + } + return []KeyVal{KeyVal(fmt.Sprintf("%s=%x", KeywordSynonym(name), h.Sum(nil)))}, nil + } + } + tartimeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + return []KeyVal{KeyVal(fmt.Sprintf("tar_time=%d.%9.9d", info.ModTime().Unix(), 0))}, nil + } + timeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + tSec := info.ModTime().Unix() + tNano := info.ModTime().Nanosecond() + return []KeyVal{KeyVal(fmt.Sprintf("time=%d.%9.9d", tSec, tNano))}, nil + } + linkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if sys, ok := info.Sys().(*tar.Header); ok { + if sys.Linkname != "" { + linkname, err := govis.Vis(sys.Linkname, DefaultVisFlags) + if err != nil { + return nil, nil + } + return []KeyVal{KeyVal(fmt.Sprintf("link=%s", linkname))}, nil + } + return nil, nil + } + + if info.Mode()&os.ModeSymlink != 0 { + str, err := os.Readlink(path) + if err != nil { + return nil, nil + } + linkname, err := govis.Vis(str, DefaultVisFlags) + if err != nil { + return nil, nil + } + return []KeyVal{KeyVal(fmt.Sprintf("link=%s", linkname))}, nil + } + return nil, nil + } + typeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if info.Mode().IsDir() { + return []KeyVal{"type=dir"}, nil + } + if info.Mode().IsRegular() { + return []KeyVal{"type=file"}, nil + } + if info.Mode()&os.ModeSocket != 0 { + return []KeyVal{"type=socket"}, nil + } + if info.Mode()&os.ModeSymlink != 0 { + return []KeyVal{"type=link"}, nil + } + if info.Mode()&os.ModeNamedPipe != 0 { + return []KeyVal{"type=fifo"}, nil + } + if info.Mode()&os.ModeDevice != 0 { + if info.Mode()&os.ModeCharDevice != 0 { + return []KeyVal{"type=char"}, nil + } + return []KeyVal{"type=block"}, nil + } + return nil, nil + } +) diff --git a/vendor/github.com/vbatts/go-mtree/keywordfuncs_bsd.go b/vendor/github.com/vbatts/go-mtree/keywordfuncs_bsd.go new file mode 100644 index 0000000000..6114109321 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/keywordfuncs_bsd.go @@ -0,0 +1,69 @@ +// +build darwin freebsd netbsd openbsd + +package mtree + +import ( + "archive/tar" + "fmt" + "io" + "os" + "os/user" + "syscall" +) + +var ( + flagsKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + // ideally this will pull in from here https://www.freebsd.org/cgi/man.cgi?query=chflags&sektion=2 + return nil, nil + } + + unameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", hdr.Uname))}, nil + } + + stat := info.Sys().(*syscall.Stat_t) + u, err := user.LookupId(fmt.Sprintf("%d", stat.Uid)) + if err != nil { + return nil, err + } + return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", u.Username))}, nil + } + gnameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", hdr.Gname))}, nil + } + + stat := info.Sys().(*syscall.Stat_t) + g, err := lookupGroupID(fmt.Sprintf("%d", stat.Gid)) + if err != nil { + return nil, err + } + return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", g.Name))}, nil + } + uidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", hdr.Uid))}, nil + } + stat := info.Sys().(*syscall.Stat_t) + return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", stat.Uid))}, nil + } + gidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", hdr.Gid))}, nil + } + if stat, ok := info.Sys().(*syscall.Stat_t); ok { + return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", stat.Gid))}, nil + } + return nil, nil + } + nlinkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if stat, ok := info.Sys().(*syscall.Stat_t); ok { + return []KeyVal{KeyVal(fmt.Sprintf("nlink=%d", stat.Nlink))}, nil + } + return nil, nil + } + xattrKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + return nil, nil + } +) diff --git a/vendor/github.com/vbatts/go-mtree/keywordfuncs_linux.go b/vendor/github.com/vbatts/go-mtree/keywordfuncs_linux.go new file mode 100644 index 0000000000..2fd82c21cd --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/keywordfuncs_linux.go @@ -0,0 +1,107 @@ +// +build linux + +package mtree + +import ( + "archive/tar" + "encoding/base64" + "fmt" + "io" + "os" + "os/user" + "syscall" + + "github.com/vbatts/go-mtree/pkg/govis" + "github.com/vbatts/go-mtree/xattr" +) + +var ( + // this is bsd specific https://www.freebsd.org/cgi/man.cgi?query=chflags&sektion=2 + flagsKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + return nil, nil + } + + unameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", hdr.Uname))}, nil + } + + stat := info.Sys().(*syscall.Stat_t) + u, err := user.LookupId(fmt.Sprintf("%d", stat.Uid)) + if err != nil { + return nil, nil + } + return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", u.Username))}, nil + } + gnameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", hdr.Gname))}, nil + } + + stat := info.Sys().(*syscall.Stat_t) + g, err := lookupGroupID(fmt.Sprintf("%d", stat.Gid)) + if err != nil { + return nil, nil + } + return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", g.Name))}, nil + } + uidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", hdr.Uid))}, nil + } + stat := info.Sys().(*syscall.Stat_t) + return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", stat.Uid))}, nil + } + gidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", hdr.Gid))}, nil + } + if stat, ok := info.Sys().(*syscall.Stat_t); ok { + return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", stat.Gid))}, nil + } + return nil, nil + } + nlinkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if stat, ok := info.Sys().(*syscall.Stat_t); ok { + return []KeyVal{KeyVal(fmt.Sprintf("nlink=%d", stat.Nlink))}, nil + } + return nil, nil + } + xattrKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + if len(hdr.Xattrs) == 0 { + return nil, nil + } + klist := []KeyVal{} + for k, v := range hdr.Xattrs { + encKey, err := govis.Vis(k, DefaultVisFlags) + if err != nil { + return nil, nil + } + klist = append(klist, KeyVal(fmt.Sprintf("xattr.%s=%s", encKey, base64.StdEncoding.EncodeToString([]byte(v))))) + } + return klist, nil + } + if !info.Mode().IsRegular() && !info.Mode().IsDir() { + return nil, nil + } + + xlist, err := xattr.List(path) + if err != nil { + return nil, nil + } + klist := make([]KeyVal, len(xlist)) + for i := range xlist { + data, err := xattr.Get(path, xlist[i]) + if err != nil { + return nil, nil + } + encKey, err := govis.Vis(xlist[i], DefaultVisFlags) + if err != nil { + return nil, nil + } + klist[i] = KeyVal(fmt.Sprintf("xattr.%s=%s", encKey, base64.StdEncoding.EncodeToString(data))) + } + return klist, nil + } +) diff --git a/vendor/github.com/vbatts/go-mtree/keywordfuncs_unsupported.go b/vendor/github.com/vbatts/go-mtree/keywordfuncs_unsupported.go new file mode 100644 index 0000000000..1284895dab --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/keywordfuncs_unsupported.go @@ -0,0 +1,47 @@ +// +build !linux,!darwin,!freebsd,!netbsd,!openbsd + +package mtree + +import ( + "archive/tar" + "fmt" + "io" + "os" +) + +var ( + // this is bsd specific https://www.freebsd.org/cgi/man.cgi?query=chflags&sektion=2 + flagsKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + return nil, nil + } + unameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", hdr.Uname))}, nil + } + return nil, nil + } + gnameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", hdr.Gname))}, nil + } + return nil, nil + } + uidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", hdr.Uid))}, nil + } + return nil, nil + } + gidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + if hdr, ok := info.Sys().(*tar.Header); ok { + return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", hdr.Gid))}, nil + } + return nil, nil + } + nlinkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + return nil, nil + } + xattrKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) { + return nil, nil + } +) diff --git a/vendor/github.com/vbatts/go-mtree/keywords.go b/vendor/github.com/vbatts/go-mtree/keywords.go new file mode 100644 index 0000000000..4e9c36190c --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/keywords.go @@ -0,0 +1,327 @@ +package mtree + +import ( + "fmt" + "strings" + + "github.com/vbatts/go-mtree/pkg/govis" +) + +// DefaultVisFlags is the set of Vis flags used when encoding filenames and +// other similar entries. +const DefaultVisFlags govis.VisFlag = govis.VisWhite | govis.VisOctal | govis.VisGlob + +// Keyword is the string name of a keyword, with some convenience functions for +// determining whether it is a default or bsd standard keyword. +// It first portion before the "=" +type Keyword string + +// Prefix is the portion of the keyword before a first "." (if present). +// +// Primarly for the xattr use-case, where the keyword `xattr.security.selinux` would have a Suffix of `security.selinux`. +func (k Keyword) Prefix() Keyword { + if strings.Contains(string(k), ".") { + return Keyword(strings.SplitN(string(k), ".", 2)[0]) + } + return k +} + +// Suffix is the portion of the keyword after a first ".". +// This is an option feature. +// +// Primarly for the xattr use-case, where the keyword `xattr.security.selinux` would have a Suffix of `security.selinux`. +func (k Keyword) Suffix() string { + if strings.Contains(string(k), ".") { + return strings.SplitN(string(k), ".", 2)[1] + } + return string(k) +} + +// Default returns whether this keyword is in the default set of keywords +func (k Keyword) Default() bool { + return InKeywordSlice(k, DefaultKeywords) +} + +// Bsd returns whether this keyword is in the upstream FreeBSD mtree(8) +func (k Keyword) Bsd() bool { + return InKeywordSlice(k, BsdKeywords) +} + +// Synonym returns the canonical name for this keyword. This is provides the +// same functionality as KeywordSynonym() +func (k Keyword) Synonym() Keyword { + return KeywordSynonym(string(k)) +} + +// InKeywordSlice checks for the presence of `a` in `list` +func InKeywordSlice(a Keyword, list []Keyword) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} +func inKeyValSlice(a KeyVal, list []KeyVal) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// ToKeywords makes a list of Keyword from a list of string +func ToKeywords(list []string) []Keyword { + ret := make([]Keyword, len(list)) + for i := range list { + ret[i] = Keyword(list[i]) + } + return ret +} + +// FromKeywords makes a list of string from a list of Keyword +func FromKeywords(list []Keyword) []string { + ret := make([]string, len(list)) + for i := range list { + ret[i] = string(list[i]) + } + return ret +} + +// KeyValToString constructs a list of string from the list of KeyVal +func KeyValToString(list []KeyVal) []string { + ret := make([]string, len(list)) + for i := range list { + ret[i] = string(list[i]) + } + return ret +} + +// StringToKeyVals constructs a list of KeyVal from the list of strings, like "keyword=value" +func StringToKeyVals(list []string) []KeyVal { + ret := make([]KeyVal, len(list)) + for i := range list { + ret[i] = KeyVal(list[i]) + } + return ret +} + +// KeyVal is a "keyword=value" +type KeyVal string + +// Keyword is the mapping to the available keywords +func (kv KeyVal) Keyword() Keyword { + if !strings.Contains(string(kv), "=") { + return Keyword("") + } + return Keyword(strings.SplitN(strings.TrimSpace(string(kv)), "=", 2)[0]) +} + +// Value is the data/value portion of "keyword=value" +func (kv KeyVal) Value() string { + if !strings.Contains(string(kv), "=") { + return "" + } + return strings.SplitN(strings.TrimSpace(string(kv)), "=", 2)[1] +} + +// NewValue returns a new KeyVal with the newval +func (kv KeyVal) NewValue(newval string) KeyVal { + return KeyVal(fmt.Sprintf("%s=%s", kv.Keyword(), newval)) +} + +// Equal returns whether two KeyVal are equivalent. This takes +// care of certain odd cases such as tar_mtime, and should be used over +// using == comparisons directly unless you really know what you're +// doing. +func (kv KeyVal) Equal(b KeyVal) bool { + // TODO: Implement handling of tar_mtime. + return kv.Keyword() == b.Keyword() && kv.Value() == b.Value() +} + +func keywordPrefixes(kvset []Keyword) []Keyword { + kvs := []Keyword{} + for _, kv := range kvset { + kvs = append(kvs, kv.Prefix()) + } + return kvs +} + +// keyvalSelector takes an array of KeyVal ("keyword=value") and filters out +// that only the set of keywords +func keyvalSelector(keyval []KeyVal, keyset []Keyword) []KeyVal { + retList := []KeyVal{} + for _, kv := range keyval { + if InKeywordSlice(kv.Keyword().Prefix(), keywordPrefixes(keyset)) { + retList = append(retList, kv) + } + } + return retList +} + +func keyValDifference(this, that []KeyVal) []KeyVal { + if len(this) == 0 { + return that + } + diff := []KeyVal{} + for _, kv := range this { + if !inKeyValSlice(kv, that) { + diff = append(diff, kv) + } + } + return diff +} +func keyValCopy(set []KeyVal) []KeyVal { + ret := make([]KeyVal, len(set)) + for i := range set { + ret[i] = set[i] + } + return ret +} + +// Has the "keyword" present in the list of KeyVal, and returns the +// corresponding KeyVal, else an empty string. +func Has(keyvals []KeyVal, keyword string) []KeyVal { + return HasKeyword(keyvals, Keyword(keyword)) +} + +// HasKeyword the "keyword" present in the list of KeyVal, and returns the +// corresponding KeyVal, else an empty string. +// This match is done on the Prefix of the keyword only. +func HasKeyword(keyvals []KeyVal, keyword Keyword) []KeyVal { + kvs := []KeyVal{} + for i := range keyvals { + if keyvals[i].Keyword().Prefix() == keyword.Prefix() { + kvs = append(kvs, keyvals[i]) + } + } + return kvs +} + +// MergeSet takes the current setKeyVals, and then applies the entryKeyVals +// such that the entry's values win. The union is returned. +func MergeSet(setKeyVals, entryKeyVals []string) []KeyVal { + retList := StringToKeyVals(setKeyVals) + eKVs := StringToKeyVals(entryKeyVals) + return MergeKeyValSet(retList, eKVs) +} + +// MergeKeyValSet does a merge of the two sets of KeyVal, and the KeyVal of +// entryKeyVals win when there is a duplicate Keyword. +func MergeKeyValSet(setKeyVals, entryKeyVals []KeyVal) []KeyVal { + retList := keyValCopy(setKeyVals) + seenKeywords := []Keyword{} + for i := range retList { + word := retList[i].Keyword() + for _, kv := range HasKeyword(entryKeyVals, word) { + // match on the keyword prefix and suffix here + if kv.Keyword() == word { + retList[i] = kv + } + } + seenKeywords = append(seenKeywords, word) + } + for i := range entryKeyVals { + if !InKeywordSlice(entryKeyVals[i].Keyword(), seenKeywords) { + retList = append(retList, entryKeyVals[i]) + } + } + return retList +} + +var ( + // DefaultKeywords has the several default keyword producers (uid, gid, + // mode, nlink, type, size, mtime) + DefaultKeywords = []Keyword{ + "size", + "type", + "uid", + "gid", + "mode", + "link", + "nlink", + "time", + } + + // DefaultTarKeywords has keywords that should be used when creating a manifest from + // an archive. Currently, evaluating the # of hardlinks has not been implemented yet + DefaultTarKeywords = []Keyword{ + "size", + "type", + "uid", + "gid", + "mode", + "link", + "tar_time", + } + + // BsdKeywords is the set of keywords that is only in the upstream FreeBSD mtree + BsdKeywords = []Keyword{ + "cksum", + "flags", // this one is really mostly BSD specific ... + "ignore", + "gid", + "gname", + "link", + "md5", + "md5digest", + "mode", + "nlink", + "nochange", + "optional", + "ripemd160digest", + "rmd160", + "rmd160digest", + "sha1", + "sha1digest", + "sha256", + "sha256digest", + "sha384", + "sha384digest", + "sha512", + "sha512digest", + "size", + "tags", + "time", + "type", + "uid", + "uname", + } + + // SetKeywords is the default set of keywords calculated for a `/set` SpecialType + SetKeywords = []Keyword{ + "uid", + "gid", + } +) + +// KeywordSynonym returns the canonical name for keywords that have synonyms, +// and just returns the name provided if there is no synonym. In this way it +// ought to be safe to wrap any keyword name. +func KeywordSynonym(name string) Keyword { + var retname string + switch name { + case "md5": + retname = "md5digest" + case "rmd160": + retname = "ripemd160digest" + case "rmd160digest": + retname = "ripemd160digest" + case "sha1": + retname = "sha1digest" + case "sha256": + retname = "sha256digest" + case "sha384": + retname = "sha384digest" + case "sha512": + retname = "sha512digest" + case "sha512256": + retname = "sha512256digest" + case "xattrs": + retname = "xattr" + default: + retname = name + } + return Keyword(retname) +} diff --git a/vendor/github.com/vbatts/go-mtree/lchtimes_unix.go b/vendor/github.com/vbatts/go-mtree/lchtimes_unix.go new file mode 100644 index 0000000000..7cb5300b83 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/lchtimes_unix.go @@ -0,0 +1,22 @@ +// +build darwin dragonfly freebsd openbsd linux netbsd solaris + +package mtree + +import ( + "os" + "time" + + "golang.org/x/sys/unix" +) + +func lchtimes(name string, atime time.Time, mtime time.Time) error { + utimes := []unix.Timespec{ + unix.NsecToTimespec(atime.UnixNano()), + unix.NsecToTimespec(mtime.UnixNano()), + } + if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes, unix.AT_SYMLINK_NOFOLLOW); e != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: e} + } + return nil + +} diff --git a/vendor/github.com/vbatts/go-mtree/lchtimes_unsupported.go b/vendor/github.com/vbatts/go-mtree/lchtimes_unsupported.go new file mode 100644 index 0000000000..fac053256d --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/lchtimes_unsupported.go @@ -0,0 +1,11 @@ +// +build windows + +package mtree + +import ( + "time" +) + +func lchtimes(name string, atime time.Time, mtime time.Time) error { + return nil +} diff --git a/vendor/github.com/vbatts/go-mtree/lookup_new.go b/vendor/github.com/vbatts/go-mtree/lookup_new.go new file mode 100644 index 0000000000..c8baae7a6f --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/lookup_new.go @@ -0,0 +1,9 @@ +// +build go1.7 + +package mtree + +import ( + "os/user" +) + +var lookupGroupID = user.LookupGroupId diff --git a/vendor/github.com/vbatts/go-mtree/lookup_old.go b/vendor/github.com/vbatts/go-mtree/lookup_old.go new file mode 100644 index 0000000000..8c22e2b5e8 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/lookup_old.go @@ -0,0 +1,102 @@ +// +build !go1.7 + +package mtree + +import ( + "bufio" + "bytes" + "io" + "os" + "strconv" + "strings" +) + +const groupFile = "/etc/group" + +var colon = []byte{':'} + +// Group represents a grouping of users. +// +// On POSIX systems Gid contains a decimal number representing the group ID. +type Group struct { + Gid string // group ID + Name string // group name +} + +func lookupGroupID(id string) (*Group, error) { + f, err := os.Open(groupFile) + if err != nil { + return nil, err + } + defer f.Close() + return findGroupID(id, f) +} + +func findGroupID(id string, r io.Reader) (*Group, error) { + if v, err := readColonFile(r, matchGroupIndexValue(id, 2)); err != nil { + return nil, err + } else if v != nil { + return v.(*Group), nil + } + return nil, UnknownGroupIDError(id) +} + +// lineFunc returns a value, an error, or (nil, nil) to skip the row. +type lineFunc func(line []byte) (v interface{}, err error) + +// readColonFile parses r as an /etc/group or /etc/passwd style file, running +// fn for each row. readColonFile returns a value, an error, or (nil, nil) if +// the end of the file is reached without a match. +func readColonFile(r io.Reader, fn lineFunc) (v interface{}, err error) { + bs := bufio.NewScanner(r) + for bs.Scan() { + line := bs.Bytes() + // There's no spec for /etc/passwd or /etc/group, but we try to follow + // the same rules as the glibc parser, which allows comments and blank + // space at the beginning of a line. + line = bytes.TrimSpace(line) + if len(line) == 0 || line[0] == '#' { + continue + } + v, err = fn(line) + if v != nil || err != nil { + return + } + } + return nil, bs.Err() +} + +func matchGroupIndexValue(value string, idx int) lineFunc { + var leadColon string + if idx > 0 { + leadColon = ":" + } + substr := []byte(leadColon + value + ":") + return func(line []byte) (v interface{}, err error) { + if !bytes.Contains(line, substr) || bytes.Count(line, colon) < 3 { + return + } + // wheel:*:0:root + parts := strings.SplitN(string(line), ":", 4) + if len(parts) < 4 || parts[0] == "" || parts[idx] != value || + // If the file contains +foo and you search for "foo", glibc + // returns an "invalid argument" error. Similarly, if you search + // for a gid for a row where the group name starts with "+" or "-", + // glibc fails to find the record. + parts[0][0] == '+' || parts[0][0] == '-' { + return + } + if _, err := strconv.Atoi(parts[2]); err != nil { + return nil, nil + } + return &Group{Name: parts[0], Gid: parts[2]}, nil + } +} + +// UnknownGroupIDError is returned by LookupGroupId when +// a group cannot be found. +type UnknownGroupIDError string + +func (e UnknownGroupIDError) Error() string { + return "group: unknown groupid " + string(e) +} diff --git a/vendor/github.com/vbatts/go-mtree/parse.go b/vendor/github.com/vbatts/go-mtree/parse.go new file mode 100644 index 0000000000..36a7163bc5 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/parse.go @@ -0,0 +1,105 @@ +package mtree + +import ( + "bufio" + "io" + "path/filepath" + "strings" +) + +// ParseSpec reads a stream of an mtree specification, and returns the DirectoryHierarchy +func ParseSpec(r io.Reader) (*DirectoryHierarchy, error) { + s := bufio.NewScanner(r) + i := int(0) + creator := dhCreator{ + DH: &DirectoryHierarchy{}, + } + for s.Scan() { + str := s.Text() + trimmedStr := strings.TrimLeftFunc(str, func(c rune) bool { + return c == ' ' || c == '\t' + }) + e := Entry{Pos: i} + switch { + case strings.HasPrefix(trimmedStr, "#"): + e.Raw = str + if strings.HasPrefix(trimmedStr, "#mtree") { + e.Type = SignatureType + } else { + e.Type = CommentType + // from here, the comment could be "# key: value" metadata + // or a relative path hint + } + case str == "": + e.Type = BlankType + // nothing else to do here + case strings.HasPrefix(str, "/"): + e.Type = SpecialType + // collapse any escaped newlines + for { + if strings.HasSuffix(str, `\`) { + str = str[:len(str)-1] + s.Scan() + str += s.Text() + } else { + break + } + } + // parse the options + f := strings.Fields(str) + e.Name = f[0] + e.Keywords = StringToKeyVals(f[1:]) + if e.Name == "/set" { + creator.curSet = &e + } else if e.Name == "/unset" { + creator.curSet = nil + } + case len(strings.Fields(str)) > 0 && strings.Fields(str)[0] == "..": + e.Type = DotDotType + e.Raw = str + if creator.curDir != nil { + creator.curDir = creator.curDir.Parent + } + // nothing else to do here + case len(strings.Fields(str)) > 0: + // collapse any escaped newlines + for { + if strings.HasSuffix(str, `\`) { + str = str[:len(str)-1] + s.Scan() + str += s.Text() + } else { + break + } + } + // parse the options + f := strings.Fields(str) + e.Name = filepath.Clean(f[0]) + if strings.Contains(e.Name, "/") { + e.Type = FullType + } else { + e.Type = RelativeType + } + e.Keywords = StringToKeyVals(f[1:]) + // TODO: gather keywords if using tar stream + e.Parent = creator.curDir + for i := range e.Keywords { + kv := KeyVal(e.Keywords[i]) + if kv.Keyword() == "type" { + if kv.Value() == "dir" { + creator.curDir = &e + } else { + creator.curEnt = &e + } + } + } + e.Set = creator.curSet + default: + // TODO(vbatts) log a warning? + continue + } + creator.DH.Entries = append(creator.DH.Entries, e) + i++ + } + return creator.DH, s.Err() +} diff --git a/vendor/github.com/vbatts/go-mtree/pkg/govis/COPYING b/vendor/github.com/vbatts/go-mtree/pkg/govis/COPYING new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/pkg/govis/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/vbatts/go-mtree/pkg/govis/README.md b/vendor/github.com/vbatts/go-mtree/pkg/govis/README.md new file mode 100644 index 0000000000..d56e8da3b2 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/pkg/govis/README.md @@ -0,0 +1,27 @@ +## `govis` ## + +`govis` is a BSD-compatible `vis(3)` and `unvis(3)` encoding implementation +that is unicode aware and written in Go. None of this code comes from the +original BSD code, nor does it come from `go-mtree`'s port of said code. +Because 80s BSD code is not very nice to read. + +### License ### + +`govis` is licensed under the Apache 2.0 license. + +``` +govis: unicode aware vis(3) encoding implementation +Copyright (C) 2017 SUSE LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/vendor/github.com/vbatts/go-mtree/pkg/govis/govis.go b/vendor/github.com/vbatts/go-mtree/pkg/govis/govis.go new file mode 100644 index 0000000000..9888c27670 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/pkg/govis/govis.go @@ -0,0 +1,39 @@ +/* + * govis: unicode aware vis(3) encoding implementation + * Copyright (C) 2017 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package govis + +// VisFlag manipulates how the characters are encoded/decoded +type VisFlag uint + +// vis() has a variety of flags when deciding what encodings to use. While +// mtree only uses one set of flags, implementing them all is necessary in +// order to have compatibility with BSD's vis() and unvis() commands. +const ( + VisOctal VisFlag = (1 << iota) // VIS_OCTAL: Use octal \ddd format. + VisCStyle // VIS_CSTYLE: Use \[nrft0..] where appropriate. + VisSpace // VIS_SP: Also encode space. + VisTab // VIS_TAB: Also encode tab. + VisNewline // VIS_NL: Also encode newline. + VisSafe // VIS_SAFE: Encode unsafe characters. + VisNoSlash // VIS_NOSLASH: Inhibit printing '\'. + VisHTTPStyle // VIS_HTTPSTYLE: HTTP-style escape %xx. + VisGlob // VIS_GLOB: Encode glob(3) magics. + visMask VisFlag = (1 << iota) - 1 // Mask of all flags. + + VisWhite VisFlag = (VisSpace | VisTab | VisNewline) +) diff --git a/vendor/github.com/vbatts/go-mtree/pkg/govis/unvis.go b/vendor/github.com/vbatts/go-mtree/pkg/govis/unvis.go new file mode 100644 index 0000000000..8a26218588 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/pkg/govis/unvis.go @@ -0,0 +1,294 @@ +/* + * govis: unicode aware vis(3) encoding implementation + * Copyright (C) 2017 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package govis + +import ( + "fmt" + "strconv" + "unicode" +) + +// unvisParser stores the current state of the token parser. +type unvisParser struct { + tokens []rune + idx int + flag VisFlag +} + +// Next moves the index to the next character. +func (p *unvisParser) Next() { + p.idx++ +} + +// Peek gets the current token. +func (p *unvisParser) Peek() (rune, error) { + if p.idx >= len(p.tokens) { + return unicode.ReplacementChar, fmt.Errorf("tried to read past end of token list") + } + return p.tokens[p.idx], nil +} + +// End returns whether all of the tokens have been consumed. +func (p *unvisParser) End() bool { + return p.idx >= len(p.tokens) +} + +func newParser(input string, flag VisFlag) *unvisParser { + return &unvisParser{ + tokens: []rune(input), + idx: 0, + flag: flag, + } +} + +// While a recursive descent parser is overkill for parsing simple escape +// codes, this is IMO much easier to read than the ugly 80s coroutine code used +// by the original unvis(3) parser. Here's the EBNF for an unvis sequence: +// +// ::= ()* +// ::= ("\" ) | ("%" ) | +// ::= any rune +// ::= ("x" ) | ("M" ) | ("^" | +// ::= ("-" ) | ("^" ) +// ::= any rune +// ::= "?" | any rune +// ::= "\" | "n" | "r" | "b" | "a" | "v" | "t" | "f" +// ::= [0-9a-f] [0-9a-f] +// ::= [0-7] ([0-7] ([0-7])?)? + +func unvisPlainRune(p *unvisParser) ([]byte, error) { + ch, err := p.Peek() + if err != nil { + return nil, fmt.Errorf("plain rune: %c", ch) + } + p.Next() + + // XXX: Maybe we should not be converting to runes and then back to strings + // here. Are we sure that the byte-for-byte representation is the + // same? If the bytes change, then using these strings for paths will + // break... + + str := string(ch) + return []byte(str), nil +} + +func unvisEscapeCStyle(p *unvisParser) ([]byte, error) { + ch, err := p.Peek() + if err != nil { + return nil, fmt.Errorf("escape hex: %s", err) + } + + output := "" + switch ch { + case 'n': + output = "\n" + case 'r': + output = "\r" + case 'b': + output = "\b" + case 'a': + output = "\x07" + case 'v': + output = "\v" + case 't': + output = "\t" + case 'f': + output = "\f" + case 's': + output = " " + case 'E': + output = "\x1b" + case '\n': + // Hidden newline. + case '$': + // Hidden marker. + default: + // XXX: We should probably allow falling through and return "\" here... + return nil, fmt.Errorf("escape cstyle: unknown escape character: %q", ch) + } + + p.Next() + return []byte(output), nil +} + +func unvisEscapeDigits(p *unvisParser, base int, force bool) ([]byte, error) { + var code int + + for i := int(0xFF); i > 0; i /= base { + ch, err := p.Peek() + if err != nil { + if !force && i != 0xFF { + break + } + return nil, fmt.Errorf("escape base %d: %s", base, err) + } + + digit, err := strconv.ParseInt(string(ch), base, 8) + if err != nil { + if !force && i != 0xFF { + break + } + return nil, fmt.Errorf("escape base %d: could not parse digit: %s", base, err) + } + + code = (code * base) + int(digit) + p.Next() + } + + if code > unicode.MaxLatin1 { + return nil, fmt.Errorf("escape base %d: code %q outside latin-1 encoding", base, code) + } + + char := byte(code & 0xFF) + return []byte{char}, nil +} + +func unvisEscapeCtrl(p *unvisParser, mask byte) ([]byte, error) { + ch, err := p.Peek() + if err != nil { + return nil, fmt.Errorf("escape ctrl: %s", err) + } + if ch > unicode.MaxLatin1 { + return nil, fmt.Errorf("escape ctrl: code %q outside latin-1 encoding", ch) + } + + char := byte(ch) & 0x1f + if ch == '?' { + char = 0x7f + } + + p.Next() + return []byte{mask | char}, nil +} + +func unvisEscapeMeta(p *unvisParser) ([]byte, error) { + ch, err := p.Peek() + if err != nil { + return nil, fmt.Errorf("escape meta: %s", err) + } + + mask := byte(0x80) + + switch ch { + case '^': + // The same as "\^..." except we apply a mask. + p.Next() + return unvisEscapeCtrl(p, mask) + + case '-': + p.Next() + + ch, err := p.Peek() + if err != nil { + return nil, fmt.Errorf("escape meta1: %s", err) + } + if ch > unicode.MaxLatin1 { + return nil, fmt.Errorf("escape meta1: code %q outside latin-1 encoding", ch) + } + + // Add mask to character. + p.Next() + return []byte{mask | byte(ch)}, nil + } + + return nil, fmt.Errorf("escape meta: unknown escape char: %s", err) +} + +func unvisEscapeSequence(p *unvisParser) ([]byte, error) { + ch, err := p.Peek() + if err != nil { + return nil, fmt.Errorf("escape sequence: %s", err) + } + + switch ch { + case '\\': + p.Next() + return []byte("\\"), nil + + case '0', '1', '2', '3', '4', '5', '6', '7': + return unvisEscapeDigits(p, 8, false) + + case 'x': + p.Next() + return unvisEscapeDigits(p, 16, true) + + case '^': + p.Next() + return unvisEscapeCtrl(p, 0x00) + + case 'M': + p.Next() + return unvisEscapeMeta(p) + + default: + return unvisEscapeCStyle(p) + } +} + +func unvisRune(p *unvisParser) ([]byte, error) { + ch, err := p.Peek() + if err != nil { + return nil, fmt.Errorf("rune: %s", err) + } + + switch ch { + case '\\': + p.Next() + return unvisEscapeSequence(p) + + case '%': + // % HEX HEX only applies to HTTPStyle encodings. + if p.flag&VisHTTPStyle == VisHTTPStyle { + p.Next() + return unvisEscapeDigits(p, 16, true) + } + fallthrough + + default: + return unvisPlainRune(p) + } +} + +func unvis(p *unvisParser) (string, error) { + var output []byte + for !p.End() { + ch, err := unvisRune(p) + if err != nil { + return "", fmt.Errorf("input: %s", err) + } + output = append(output, ch...) + } + return string(output), nil +} + +// Unvis takes a string formatted with the given Vis flags (though only the +// VisHTTPStyle flag is checked) and output the un-encoded version of the +// encoded string. An error is returned if any escape sequences in the input +// string were invalid. +func Unvis(input string, flag VisFlag) (string, error) { + // TODO: Check all of the VisFlag bits. + p := newParser(input, flag) + output, err := unvis(p) + if err != nil { + return "", fmt.Errorf("unvis: %s", err) + } + if !p.End() { + return "", fmt.Errorf("unvis: trailing characters at end of input") + } + return output, nil +} diff --git a/vendor/github.com/vbatts/go-mtree/pkg/govis/vis.go b/vendor/github.com/vbatts/go-mtree/pkg/govis/vis.go new file mode 100644 index 0000000000..140556a658 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/pkg/govis/vis.go @@ -0,0 +1,177 @@ +/* + * govis: unicode aware vis(3) encoding implementation + * Copyright (C) 2017 SUSE LLC. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package govis + +import ( + "fmt" + "unicode" +) + +func isunsafe(ch rune) bool { + return ch == '\b' || ch == '\007' || ch == '\r' +} + +func isglob(ch rune) bool { + return ch == '*' || ch == '?' || ch == '[' || ch == '#' +} + +// ishttp is defined by RFC 1808. +func ishttp(ch rune) bool { + // RFC1808 does not really consider characters outside of ASCII, so just to + // be safe always treat characters outside the ASCII character set as "not + // HTTP". + if ch > unicode.MaxASCII { + return false + } + + return unicode.IsDigit(ch) || unicode.IsLetter(ch) || + // Safe characters. + ch == '$' || ch == '-' || ch == '_' || ch == '.' || ch == '+' || + // Extra characters. + ch == '!' || ch == '*' || ch == '\'' || ch == '(' || + ch == ')' || ch == ',' +} + +func isgraph(ch rune) bool { + return unicode.IsGraphic(ch) && !unicode.IsSpace(ch) && ch <= unicode.MaxASCII +} + +// vis converts a single *byte* into its encoding. While Go supports the +// concept of runes (and thus native utf-8 parsing), in order to make sure that +// the bit-stream will be completely maintained through an Unvis(Vis(...)) +// round-trip. The downside is that Vis() will never output unicode -- but on +// the plus side this is actually a benefit on the encoding side (it will +// always work with the simple unvis(3) implementation). It also means that we +// don't have to worry about different multi-byte encodings. +func vis(b byte, flag VisFlag) (string, error) { + // Treat the single-byte character as a rune. + ch := rune(b) + + // XXX: This is quite a horrible thing to support. + if flag&VisHTTPStyle == VisHTTPStyle { + if !ishttp(ch) { + return "%" + fmt.Sprintf("%.2X", ch), nil + } + } + + // Figure out if the character doesn't need to be encoded. Effectively, we + // encode most "normal" (graphical) characters as themselves unless we have + // been specifically asked not to. Note though that we *ALWAYS* encode + // everything outside ASCII. + // TODO: Switch this to much more logical code. + + if ch > unicode.MaxASCII { + /* ... */ + } else if flag&VisGlob == VisGlob && isglob(ch) { + /* ... */ + } else if isgraph(ch) || + (flag&VisSpace != VisSpace && ch == ' ') || + (flag&VisTab != VisTab && ch == '\t') || + (flag&VisNewline != VisNewline && ch == '\n') || + (flag&VisSafe != 0 && isunsafe(ch)) { + + encoded := string(ch) + if ch == '\\' && flag&VisNoSlash == 0 { + encoded += "\\" + } + return encoded, nil + } + + // Try to use C-style escapes first. + if flag&VisCStyle == VisCStyle { + switch ch { + case ' ': + return "\\s", nil + case '\n': + return "\\n", nil + case '\r': + return "\\r", nil + case '\b': + return "\\b", nil + case '\a': + return "\\a", nil + case '\v': + return "\\v", nil + case '\t': + return "\\t", nil + case '\f': + return "\\f", nil + case '\x00': + // Output octal just to be safe. + return "\\000", nil + } + } + + // For graphical characters we generate octal output (and also if it's + // being forced by the caller's flags). Also spaces should always be + // encoded as octal. + if flag&VisOctal == VisOctal || isgraph(ch) || ch&0x7f == ' ' { + // Always output three-character octal just to be safe. + return fmt.Sprintf("\\%.3o", ch), nil + } + + // Now we have to output meta or ctrl escapes. As far as I can tell, this + // is not actually defined by any standard -- so this logic is basically + // copied from the original vis(3) implementation. Hopefully nobody + // actually relies on this (octal and hex are better). + + encoded := "" + if flag&VisNoSlash == 0 { + encoded += "\\" + } + + // Meta characters have 0x80 set, but are otherwise identical to control + // characters. + if b&0x80 != 0 { + b &= 0x7f + encoded += "M" + } + + if unicode.IsControl(rune(b)) { + encoded += "^" + if b == 0x7f { + encoded += "?" + } else { + encoded += fmt.Sprintf("%c", b+'@') + } + } else { + encoded += fmt.Sprintf("-%c", b) + } + + return encoded, nil +} + +// Vis encodes the provided string to a BSD-compatible encoding using BSD's +// vis() flags. However, it will correctly handle multi-byte encoding (which is +// not done properly by BSD's vis implementation). +func Vis(src string, flag VisFlag) (string, error) { + if flag&visMask != flag { + return "", fmt.Errorf("vis: flag %q contains unknown or unsupported flags", flag) + } + + output := "" + for _, ch := range []byte(src) { + encodedCh, err := vis(ch, flag) + if err != nil { + return "", err + } + output += encodedCh + } + + return output, nil +} diff --git a/vendor/github.com/vbatts/go-mtree/releases.md b/vendor/github.com/vbatts/go-mtree/releases.md new file mode 100644 index 0000000000..89ee97e471 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/releases.md @@ -0,0 +1,11 @@ +# How to do releases: + +* Create a changeset with an update to `version.go` + - this commit will be tagged + - add another commit putting it back with '-dev' appended +* gpg sign the commit with an incremented version, like 'vX.Y.Z' +* Push the tag +* Create a "release" from the tag on github + - include the binaries from `make build.arches` + - write about notable changes, and their contributors + - PRs merged for the release diff --git a/vendor/github.com/vbatts/go-mtree/stat_unix.go b/vendor/github.com/vbatts/go-mtree/stat_unix.go new file mode 100644 index 0000000000..9b87eb6f18 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/stat_unix.go @@ -0,0 +1,18 @@ +// +build !windows + +package mtree + +import ( + "os" + "syscall" +) + +func statIsUID(stat os.FileInfo, uid int) bool { + statT := stat.Sys().(*syscall.Stat_t) + return statT.Uid == uint32(uid) +} + +func statIsGID(stat os.FileInfo, gid int) bool { + statT := stat.Sys().(*syscall.Stat_t) + return statT.Gid == uint32(gid) +} diff --git a/vendor/github.com/vbatts/go-mtree/stat_windows.go b/vendor/github.com/vbatts/go-mtree/stat_windows.go new file mode 100644 index 0000000000..34eb28e83a --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/stat_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package mtree + +import "os" + +func statIsUID(stat os.FileInfo, uid int) bool { + return false +} +func statIsGID(stat os.FileInfo, uid int) bool { + return false +} diff --git a/vendor/github.com/vbatts/go-mtree/tar.go b/vendor/github.com/vbatts/go-mtree/tar.go new file mode 100644 index 0000000000..51e251a060 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/tar.go @@ -0,0 +1,461 @@ +package mtree + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/vbatts/go-mtree/pkg/govis" +) + +// Streamer creates a file hierarchy out of a tar stream +type Streamer interface { + io.ReadCloser + Hierarchy() (*DirectoryHierarchy, error) +} + +var tarDefaultSetKeywords = []KeyVal{ + "type=file", + "flags=none", + "mode=0664", +} + +// NewTarStreamer streams a tar archive and creates a file hierarchy based off +// of the tar metadata headers +func NewTarStreamer(r io.Reader, excludes []ExcludeFunc, keywords []Keyword) Streamer { + pR, pW := io.Pipe() + ts := &tarStream{ + pipeReader: pR, + pipeWriter: pW, + creator: dhCreator{DH: &DirectoryHierarchy{}}, + teeReader: io.TeeReader(r, pW), + tarReader: tar.NewReader(pR), + keywords: keywords, + hardlinks: map[string][]string{}, + excludes: excludes, + } + + go ts.readHeaders() + return ts +} + +type tarStream struct { + root *Entry + hardlinks map[string][]string + creator dhCreator + pipeReader *io.PipeReader + pipeWriter *io.PipeWriter + teeReader io.Reader + tarReader *tar.Reader + keywords []Keyword + excludes []ExcludeFunc + err error +} + +func (ts *tarStream) readHeaders() { + // remove "time" keyword + notimekws := []Keyword{} + for _, kw := range ts.keywords { + if !InKeywordSlice(kw, notimekws) { + if kw == "time" { + if !InKeywordSlice("tar_time", ts.keywords) { + notimekws = append(notimekws, "tar_time") + } + } else { + notimekws = append(notimekws, kw) + } + } + } + ts.keywords = notimekws + // We have to start with the directory we're in, and anything beyond these + // items is determined at the time a tar is extracted. + ts.root = &Entry{ + Name: ".", + Type: RelativeType, + Prev: &Entry{ + Raw: "# .", + Type: CommentType, + }, + Set: nil, + Keywords: []KeyVal{"type=dir"}, + } + // insert signature and metadata comments first (user, machine, tree, date) + for _, e := range signatureEntries("") { + e.Pos = len(ts.creator.DH.Entries) + ts.creator.DH.Entries = append(ts.creator.DH.Entries, e) + } + // insert keyword metadata next + for _, e := range keywordEntries(ts.keywords) { + e.Pos = len(ts.creator.DH.Entries) + ts.creator.DH.Entries = append(ts.creator.DH.Entries, e) + } +hdrloop: + for { + hdr, err := ts.tarReader.Next() + if err != nil { + ts.pipeReader.CloseWithError(err) + return + } + + for _, ex := range ts.excludes { + if ex(hdr.Name, hdr.FileInfo()) { + continue hdrloop + } + } + + // Because the content of the file may need to be read by several + // KeywordFuncs, it needs to be an io.Seeker as well. So, just reading from + // ts.tarReader is not enough. + tmpFile, err := ioutil.TempFile("", "ts.payload.") + if err != nil { + ts.pipeReader.CloseWithError(err) + return + } + // for good measure + if err := tmpFile.Chmod(0600); err != nil { + tmpFile.Close() + os.Remove(tmpFile.Name()) + ts.pipeReader.CloseWithError(err) + return + } + if _, err := io.Copy(tmpFile, ts.tarReader); err != nil { + tmpFile.Close() + os.Remove(tmpFile.Name()) + ts.pipeReader.CloseWithError(err) + return + } + // Alright, it's either file or directory + encodedName, err := govis.Vis(filepath.Base(hdr.Name), DefaultVisFlags) + if err != nil { + tmpFile.Close() + os.Remove(tmpFile.Name()) + ts.pipeReader.CloseWithError(err) + return + } + e := Entry{ + Name: encodedName, + Type: RelativeType, + } + + // Keep track of which files are hardlinks so we can resolve them later + if hdr.Typeflag == tar.TypeLink { + keyFunc := KeywordFuncs["link"] + kvs, err := keyFunc(hdr.Name, hdr.FileInfo(), nil) + if err != nil { + logrus.Warn(err) + break // XXX is breaking an okay thing to do here? + } + linkname, err := govis.Unvis(KeyVal(kvs[0]).Value(), DefaultVisFlags) + if err != nil { + logrus.Warn(err) + break // XXX is breaking an okay thing to do here? + } + if _, ok := ts.hardlinks[linkname]; !ok { + ts.hardlinks[linkname] = []string{hdr.Name} + } else { + ts.hardlinks[linkname] = append(ts.hardlinks[linkname], hdr.Name) + } + } + + // now collect keywords on the file + for _, keyword := range ts.keywords { + if keyFunc, ok := KeywordFuncs[keyword.Prefix()]; ok { + // We can't extract directories on to disk, so "size" keyword + // is irrelevant for now + if hdr.FileInfo().IsDir() && keyword == "size" { + continue + } + kvs, err := keyFunc(hdr.Name, hdr.FileInfo(), tmpFile) + if err != nil { + ts.setErr(err) + } + // for good measure, check that we actually get a value for a keyword + if len(kvs) > 0 && kvs[0] != "" { + e.Keywords = append(e.Keywords, kvs[0]) + } + + // don't forget to reset the reader + if _, err := tmpFile.Seek(0, 0); err != nil { + tmpFile.Close() + os.Remove(tmpFile.Name()) + ts.pipeReader.CloseWithError(err) + return + } + } + } + // collect meta-set keywords for a directory so that we can build the + // actual sets in `flatten` + if hdr.FileInfo().IsDir() { + s := Entry{ + Name: "meta-set", + Type: SpecialType, + } + for _, setKW := range SetKeywords { + if keyFunc, ok := KeywordFuncs[setKW.Prefix()]; ok { + kvs, err := keyFunc(hdr.Name, hdr.FileInfo(), tmpFile) + if err != nil { + ts.setErr(err) + } + for _, kv := range kvs { + if kv != "" { + s.Keywords = append(s.Keywords, kv) + } + } + if _, err := tmpFile.Seek(0, 0); err != nil { + tmpFile.Close() + os.Remove(tmpFile.Name()) + ts.pipeReader.CloseWithError(err) + } + } + } + e.Set = &s + } + err = populateTree(ts.root, &e, hdr) + if err != nil { + ts.setErr(err) + } + tmpFile.Close() + os.Remove(tmpFile.Name()) + } +} + +// populateTree creates a pseudo file tree hierarchy using an Entry's Parent and +// Children fields. When examining the Entry e to insert in the tree, we +// determine if the path to that Entry exists yet. If it does, insert it in the +// appropriate position in the tree. If not, create a path up until the Entry's +// directory that it is contained in. Then, insert the Entry. +// root: the "." Entry +// e: the Entry we are looking to insert +// hdr: the tar header struct associated with e +func populateTree(root, e *Entry, hdr *tar.Header) error { + if root == nil || e == nil { + return fmt.Errorf("cannot populate or insert nil Entry's") + } else if root.Prev == nil { + return fmt.Errorf("root needs to be an Entry associated with a directory") + } + isDir := hdr.FileInfo().IsDir() + wd := filepath.Clean(hdr.Name) + if !isDir { + // directory up until the actual file + wd = filepath.Dir(wd) + if wd == "." { + root.Children = append([]*Entry{e}, root.Children...) + e.Parent = root + return nil + } + } + dirNames := strings.Split(wd, "/") + parent := root + for _, name := range dirNames[:] { + encoded, err := govis.Vis(name, DefaultVisFlags) + if err != nil { + return err + } + if node := parent.Descend(encoded); node == nil { + // Entry for directory doesn't exist in tree relative to root. + // We don't know if this directory is an actual tar header (because a + // user could have just specified a path to a deep file), so we must + // specify this placeholder directory as a "type=dir", and Set=nil. + newEntry := Entry{ + Name: encoded, + Type: RelativeType, + Parent: parent, + Keywords: []KeyVal{"type=dir"}, // temp data + Set: nil, // temp data + } + pathname, err := newEntry.Path() + if err != nil { + return err + } + newEntry.Prev = &Entry{ + Type: CommentType, + Raw: "# " + pathname, + } + parent.Children = append(parent.Children, &newEntry) + parent = &newEntry + } else { + // Entry for directory exists in tree, just keep going + parent = node + } + } + if !isDir { + parent.Children = append([]*Entry{e}, parent.Children...) + e.Parent = parent + } else { + // fill in the actual data from e + parent.Keywords = e.Keywords + parent.Set = e.Set + } + return nil +} + +// After constructing a pseudo file hierarchy tree, we want to "flatten" this +// tree by putting the Entries into a slice with appropriate positioning. +// root: the "head" of the sub-tree to flatten +// creator: a dhCreator that helps with the '/set' keyword +// keywords: keywords specified by the user that should be evaluated +func flatten(root *Entry, creator *dhCreator, keywords []Keyword) { + if root == nil || creator == nil { + return + } + if root.Prev != nil { + // root.Prev != nil implies root is a directory + creator.DH.Entries = append(creator.DH.Entries, + Entry{ + Type: BlankType, + Pos: len(creator.DH.Entries), + }) + root.Prev.Pos = len(creator.DH.Entries) + creator.DH.Entries = append(creator.DH.Entries, *root.Prev) + + if root.Set != nil { + // Check if we need a new set + consolidatedKeys := keyvalSelector(append(tarDefaultSetKeywords, root.Set.Keywords...), keywords) + if creator.curSet == nil { + creator.curSet = &Entry{ + Type: SpecialType, + Name: "/set", + Keywords: consolidatedKeys, + Pos: len(creator.DH.Entries), + } + creator.DH.Entries = append(creator.DH.Entries, *creator.curSet) + } else { + needNewSet := false + for _, k := range root.Set.Keywords { + if !inKeyValSlice(k, creator.curSet.Keywords) { + needNewSet = true + break + } + } + if needNewSet { + creator.curSet = &Entry{ + Name: "/set", + Type: SpecialType, + Pos: len(creator.DH.Entries), + Keywords: consolidatedKeys, + } + creator.DH.Entries = append(creator.DH.Entries, *creator.curSet) + } + } + } else if creator.curSet != nil { + // Getting into here implies that the Entry's set has not and + // was not supposed to be evaluated, thus, we need to reset curSet + creator.DH.Entries = append(creator.DH.Entries, Entry{ + Name: "/unset", + Type: SpecialType, + Pos: len(creator.DH.Entries), + }) + creator.curSet = nil + } + } + root.Set = creator.curSet + if creator.curSet != nil { + root.Keywords = keyValDifference(root.Keywords, creator.curSet.Keywords) + } + root.Pos = len(creator.DH.Entries) + creator.DH.Entries = append(creator.DH.Entries, *root) + for _, c := range root.Children { + flatten(c, creator, keywords) + } + if root.Prev != nil { + // Show a comment when stepping out + root.Prev.Pos = len(creator.DH.Entries) + creator.DH.Entries = append(creator.DH.Entries, *root.Prev) + dotEntry := Entry{ + Type: DotDotType, + Name: "..", + Pos: len(creator.DH.Entries), + } + creator.DH.Entries = append(creator.DH.Entries, dotEntry) + } + return +} + +// resolveHardlinks goes through an Entry tree, and finds the Entry's associated +// with hardlinks and fills them in with the actual data from the base file. +func resolveHardlinks(root *Entry, hardlinks map[string][]string, countlinks bool) { + originals := make(map[string]*Entry) + for base, links := range hardlinks { + var basefile *Entry + if seen, ok := originals[base]; !ok { + basefile = root.Find(base) + if basefile == nil { + logrus.Printf("%s does not exist in this tree\n", base) + continue + } + originals[base] = basefile + } else { + basefile = seen + } + for _, link := range links { + linkfile := root.Find(link) + if linkfile == nil { + logrus.Printf("%s does not exist in this tree\n", link) + continue + } + linkfile.Keywords = basefile.Keywords + if countlinks { + linkfile.Keywords = append(linkfile.Keywords, KeyVal(fmt.Sprintf("nlink=%d", len(links)+1))) + } + } + if countlinks { + basefile.Keywords = append(basefile.Keywords, KeyVal(fmt.Sprintf("nlink=%d", len(links)+1))) + } + } +} + +// filter takes in a pointer to an Entry, and returns a slice of Entry's that +// satisfy the predicate p +func filter(root *Entry, p func(*Entry) bool) []Entry { + if root != nil { + var validEntrys []Entry + if len(root.Children) > 0 || root.Prev != nil { + for _, c := range root.Children { + // filter the sub-directory + if c.Prev != nil { + validEntrys = append(validEntrys, filter(c, p)...) + } + if p(c) { + if c.Prev == nil { + validEntrys = append([]Entry{*c}, validEntrys...) + } else { + validEntrys = append(validEntrys, *c) + } + } + } + return validEntrys + } + } + return nil +} + +func (ts *tarStream) setErr(err error) { + ts.err = err +} + +func (ts *tarStream) Read(p []byte) (n int, err error) { + return ts.teeReader.Read(p) +} + +func (ts *tarStream) Close() error { + return ts.pipeReader.Close() +} + +// Hierarchy returns the DirectoryHierarchy of the archive. It flattens the +// Entry tree before returning the DirectoryHierarchy +func (ts *tarStream) Hierarchy() (*DirectoryHierarchy, error) { + if ts.err != nil && ts.err != io.EOF { + return nil, ts.err + } + if ts.root == nil { + return nil, fmt.Errorf("root Entry not found, nothing to flatten") + } + resolveHardlinks(ts.root, ts.hardlinks, InKeywordSlice(Keyword("nlink"), ts.keywords)) + flatten(ts.root, &ts.creator, ts.keywords) + return ts.creator.DH, nil +} diff --git a/vendor/github.com/vbatts/go-mtree/update.go b/vendor/github.com/vbatts/go-mtree/update.go new file mode 100644 index 0000000000..5c37a15964 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/update.go @@ -0,0 +1,154 @@ +package mtree + +import ( + "container/heap" + "os" + "sort" + + "github.com/sirupsen/logrus" +) + +// DefaultUpdateKeywords is the default set of keywords that can take updates to the files on disk +var DefaultUpdateKeywords = []Keyword{ + "uid", + "gid", + "mode", + "xattr", + "link", + "time", +} + +// Update attempts to set the attributes of root directory path, given the values of `keywords` in dh DirectoryHierarchy. +func Update(root string, dh *DirectoryHierarchy, keywords []Keyword, fs FsEval) ([]InodeDelta, error) { + creator := dhCreator{DH: dh} + curDir, err := os.Getwd() + if err == nil { + defer os.Chdir(curDir) + } + + if err := os.Chdir(root); err != nil { + return nil, err + } + sort.Sort(byPos(creator.DH.Entries)) + + // This is for deferring the update of mtimes of directories, to unwind them + // in a most specific path first + h := &pathUpdateHeap{} + heap.Init(h) + + results := []InodeDelta{} + for i, e := range creator.DH.Entries { + switch e.Type { + case SpecialType: + if e.Name == "/set" { + creator.curSet = &creator.DH.Entries[i] + } else if e.Name == "/unset" { + creator.curSet = nil + } + logrus.Debugf("%#v", e) + continue + case RelativeType, FullType: + e.Set = creator.curSet + pathname, err := e.Path() + if err != nil { + return nil, err + } + + // filter the keywords to update on the file, from the keywords available for this entry: + var kvToUpdate []KeyVal + kvToUpdate = keyvalSelector(e.AllKeys(), keywords) + logrus.Debugf("kvToUpdate(%q): %#v", pathname, kvToUpdate) + + for _, kv := range kvToUpdate { + if !InKeywordSlice(kv.Keyword().Prefix(), keywordPrefixes(keywords)) { + continue + } + logrus.Debugf("finding function for %q (%q)", kv.Keyword(), kv.Keyword().Prefix()) + ukFunc, ok := UpdateKeywordFuncs[kv.Keyword().Prefix()] + if !ok { + logrus.Debugf("no UpdateKeywordFunc for %s; skipping", kv.Keyword()) + continue + } + + // TODO check for the type=dir of the entry as well + if kv.Keyword().Prefix() == "time" && e.IsDir() { + heap.Push(h, pathUpdate{ + Path: pathname, + E: e, + KV: kv, + Func: ukFunc, + }) + + continue + } + + if _, err := ukFunc(pathname, kv); err != nil { + results = append(results, InodeDelta{ + diff: ErrorDifference, + path: pathname, + old: e, + keys: []KeyDelta{ + { + diff: ErrorDifference, + name: kv.Keyword(), + err: err, + }, + }}) + } + // XXX really would be great to have a Check() or Compare() right here, + // to compare each entry as it is encountered, rather than just running + // Check() on this path after the whole update is finished. + } + } + } + + for h.Len() > 0 { + pu := heap.Pop(h).(pathUpdate) + if _, err := pu.Func(pu.Path, pu.KV); err != nil { + results = append(results, InodeDelta{ + diff: ErrorDifference, + path: pu.Path, + old: pu.E, + keys: []KeyDelta{ + { + diff: ErrorDifference, + name: pu.KV.Keyword(), + err: err, + }, + }}) + } + } + return results, nil +} + +type pathUpdateHeap []pathUpdate + +func (h pathUpdateHeap) Len() int { return len(h) } +func (h pathUpdateHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// This may end up looking backwards, but for container/heap, Less evaluates +// the negative priority. So when popping members of the array, it will be +// sorted by least. For this use-case, we want the most-qualified-name popped +// first (the longest path name), such that "." is the last entry popped. +func (h pathUpdateHeap) Less(i, j int) bool { + return len(h[i].Path) > len(h[j].Path) +} + +func (h *pathUpdateHeap) Push(x interface{}) { + *h = append(*h, x.(pathUpdate)) +} + +func (h *pathUpdateHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +type pathUpdate struct { + Path string + E Entry + KV KeyVal + Func UpdateKeywordFunc +} diff --git a/vendor/github.com/vbatts/go-mtree/updatefuncs.go b/vendor/github.com/vbatts/go-mtree/updatefuncs.go new file mode 100644 index 0000000000..7bc2462f9e --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/updatefuncs.go @@ -0,0 +1,201 @@ +package mtree + +import ( + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/sirupsen/logrus" + "github.com/vbatts/go-mtree/pkg/govis" +) + +// UpdateKeywordFunc is the signature for a function that will restore a file's +// attributes. Where path is relative path to the file, and value to be +// restored to. +type UpdateKeywordFunc func(path string, kv KeyVal) (os.FileInfo, error) + +// UpdateKeywordFuncs is the registered list of functions to update file attributes. +// Keyed by the keyword as it would show up in the manifest +var UpdateKeywordFuncs = map[Keyword]UpdateKeywordFunc{ + "mode": modeUpdateKeywordFunc, + "time": timeUpdateKeywordFunc, + "tar_time": tartimeUpdateKeywordFunc, + "uid": uidUpdateKeywordFunc, + "gid": gidUpdateKeywordFunc, + "xattr": xattrUpdateKeywordFunc, + "link": linkUpdateKeywordFunc, +} + +func uidUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) { + uid, err := strconv.Atoi(kv.Value()) + if err != nil { + return nil, err + } + + stat, err := os.Lstat(path) + if err != nil { + return nil, err + } + if statIsUID(stat, uid) { + return stat, nil + } + + if err := os.Lchown(path, uid, -1); err != nil { + return nil, err + } + return os.Lstat(path) +} + +func gidUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) { + gid, err := strconv.Atoi(kv.Value()) + if err != nil { + return nil, err + } + + stat, err := os.Lstat(path) + if err != nil { + return nil, err + } + if statIsGID(stat, gid) { + return stat, nil + } + + if err := os.Lchown(path, -1, gid); err != nil { + return nil, err + } + return os.Lstat(path) +} + +func modeUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) { + info, err := os.Lstat(path) + if err != nil { + return nil, err + } + + // don't set mode on symlinks, as it passes through to the backing file + if info.Mode()&os.ModeSymlink != 0 { + return info, nil + } + vmode, err := strconv.ParseInt(kv.Value(), 8, 32) + if err != nil { + return nil, err + } + + stat, err := os.Lstat(path) + if err != nil { + return nil, err + } + if stat.Mode() == os.FileMode(vmode) { + return stat, nil + } + + logrus.Debugf("path: %q, kv.Value(): %q, vmode: %o", path, kv.Value(), vmode) + if err := os.Chmod(path, os.FileMode(vmode)); err != nil { + return nil, err + } + return os.Lstat(path) +} + +// since tar_time will only be second level precision, then when restoring the +// filepath from a tar_time, then compare the seconds first and only Chtimes if +// the seconds value is different. +func tartimeUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) { + info, err := os.Lstat(path) + if err != nil { + return nil, err + } + + v := strings.SplitN(kv.Value(), ".", 2) + if len(v) != 2 { + return nil, fmt.Errorf("expected a number like 1469104727.000000000") + } + sec, err := strconv.ParseInt(v[0], 10, 64) + if err != nil { + return nil, fmt.Errorf("expected seconds, but got %q", v[0]) + } + + // if the seconds are the same, don't do anything, because the file might + // have nanosecond value, and if using tar_time it would zero it out. + if info.ModTime().Unix() == sec { + return info, nil + } + + vtime := time.Unix(sec, 0) + + // if times are same then don't modify anything + // comparing Unix, since it does not include Nano seconds + if info.ModTime().Unix() == vtime.Unix() { + return info, nil + } + + // symlinks are strange and most of the time passes through to the backing file + if info.Mode()&os.ModeSymlink != 0 { + if err := lchtimes(path, vtime, vtime); err != nil { + return nil, err + } + } else if err := os.Chtimes(path, vtime, vtime); err != nil { + return nil, err + } + return os.Lstat(path) +} + +// this is nano second precision +func timeUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) { + info, err := os.Lstat(path) + if err != nil { + return nil, err + } + + v := strings.SplitN(kv.Value(), ".", 2) + if len(v) != 2 { + return nil, fmt.Errorf("expected a number like 1469104727.871937272") + } + nsec, err := strconv.ParseInt(v[0]+v[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("expected nano seconds, but got %q", v[0]+v[1]) + } + logrus.Debugf("arg: %q; nsec: %d", v[0]+v[1], nsec) + + vtime := time.Unix(0, nsec) + + // if times are same then don't modify anything + if info.ModTime().Equal(vtime) { + return info, nil + } + + // symlinks are strange and most of the time passes through to the backing file + if info.Mode()&os.ModeSymlink != 0 { + if err := lchtimes(path, vtime, vtime); err != nil { + return nil, err + } + } else if err := os.Chtimes(path, vtime, vtime); err != nil { + return nil, err + } + return os.Lstat(path) +} + +func linkUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) { + linkname, err := govis.Unvis(kv.Value(), DefaultVisFlags) + if err != nil { + return nil, err + } + got, err := os.Readlink(path) + if err != nil { + return nil, err + } + if got == linkname { + return os.Lstat(path) + } + + logrus.Debugf("linkUpdateKeywordFunc: removing %q to link to %q", path, linkname) + if err := os.Remove(path); err != nil { + return nil, err + } + if err := os.Symlink(linkname, path); err != nil { + return nil, err + } + + return os.Lstat(path) +} diff --git a/vendor/github.com/vbatts/go-mtree/updatefuncs_linux.go b/vendor/github.com/vbatts/go-mtree/updatefuncs_linux.go new file mode 100644 index 0000000000..b7d7e834e4 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/updatefuncs_linux.go @@ -0,0 +1,21 @@ +// +build linux + +package mtree + +import ( + "encoding/base64" + "os" + + "github.com/vbatts/go-mtree/xattr" +) + +func xattrUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) { + buf, err := base64.StdEncoding.DecodeString(kv.Value()) + if err != nil { + return nil, err + } + if err := xattr.Set(path, kv.Keyword().Suffix(), buf); err != nil { + return nil, err + } + return os.Lstat(path) +} diff --git a/vendor/github.com/vbatts/go-mtree/updatefuncs_unsupported.go b/vendor/github.com/vbatts/go-mtree/updatefuncs_unsupported.go new file mode 100644 index 0000000000..9fc70e4be9 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/updatefuncs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package mtree + +import ( + "os" +) + +func xattrUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) { + return os.Lstat(path) +} diff --git a/vendor/github.com/vbatts/go-mtree/version.go b/vendor/github.com/vbatts/go-mtree/version.go new file mode 100644 index 0000000000..ba089cb334 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/version.go @@ -0,0 +1,23 @@ +package mtree + +import "fmt" + +const ( + // AppName is the name ... of this library/application + AppName = "gomtree" +) + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 0 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 5 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "-dev" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/vbatts/go-mtree/walk.go b/vendor/github.com/vbatts/go-mtree/walk.go new file mode 100644 index 0000000000..56b93dc512 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/walk.go @@ -0,0 +1,385 @@ +package mtree + +import ( + "fmt" + "io" + "os" + "os/user" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/vbatts/go-mtree/pkg/govis" +) + +// ExcludeFunc is the type of function called on each path walked to determine +// whether to be excluded from the assembled DirectoryHierarchy. If the func +// returns true, then the path is not included in the spec. +type ExcludeFunc func(path string, info os.FileInfo) bool + +// ExcludeNonDirectories is an ExcludeFunc for excluding all paths that are not directories +var ExcludeNonDirectories = func(path string, info os.FileInfo) bool { + return !info.IsDir() +} + +var defaultSetKeyVals = []KeyVal{"type=file", "nlink=1", "flags=none", "mode=0664"} + +// Walk from root directory and assemble the DirectoryHierarchy +// * `excludes` provided are used to skip paths +// * `keywords` are the set to collect from the walked paths. The recommended default list is DefaultKeywords. +// * `fsEval` is the interface to use in evaluating files. If `nil`, then DefaultFsEval is used. +func Walk(root string, excludes []ExcludeFunc, keywords []Keyword, fsEval FsEval) (*DirectoryHierarchy, error) { + if fsEval == nil { + fsEval = DefaultFsEval{} + } + creator := dhCreator{DH: &DirectoryHierarchy{}, fs: fsEval} + // insert signature and metadata comments first (user, machine, tree, date) + for _, e := range signatureEntries(root) { + e.Pos = len(creator.DH.Entries) + creator.DH.Entries = append(creator.DH.Entries, e) + } + // insert keyword metadata next + for _, e := range keywordEntries(keywords) { + e.Pos = len(creator.DH.Entries) + creator.DH.Entries = append(creator.DH.Entries, e) + } + // walk the directory and add entries + err := startWalk(&creator, root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + for _, ex := range excludes { + if ex(path, info) { + return nil + } + } + + entryPathName := filepath.Base(path) + if info.IsDir() { + creator.DH.Entries = append(creator.DH.Entries, Entry{ + Type: BlankType, + Pos: len(creator.DH.Entries), + }) + + // Insert a comment of the full path of the directory's name + if creator.curDir != nil { + dirname, err := creator.curDir.Path() + if err != nil { + return err + } + creator.DH.Entries = append(creator.DH.Entries, Entry{ + Pos: len(creator.DH.Entries), + Raw: "# " + filepath.Join(dirname, entryPathName), + Type: CommentType, + }) + } else { + entryPathName = "." + creator.DH.Entries = append(creator.DH.Entries, Entry{ + Pos: len(creator.DH.Entries), + Raw: "# .", + Type: CommentType, + }) + } + + // set the initial /set keywords + if creator.curSet == nil { + e := Entry{ + Name: "/set", + Type: SpecialType, + Pos: len(creator.DH.Entries), + Keywords: keyvalSelector(defaultSetKeyVals, keywords), + } + for _, keyword := range SetKeywords { + err := func() error { + var r io.Reader + if info.Mode().IsRegular() { + fh, err := creator.fs.Open(path) + if err != nil { + return err + } + defer fh.Close() + r = fh + } + keyFunc, ok := KeywordFuncs[keyword.Prefix()] + if !ok { + return fmt.Errorf("Unknown keyword %q for file %q", keyword.Prefix(), path) + } + kvs, err := creator.fs.KeywordFunc(keyFunc)(path, info, r) + if err != nil { + return err + } + for _, kv := range kvs { + if kv != "" { + e.Keywords = append(e.Keywords, kv) + } + } + return nil + }() + if err != nil { + return err + } + } + creator.curSet = &e + creator.DH.Entries = append(creator.DH.Entries, e) + } else if creator.curSet != nil { + // check the attributes of the /set keywords and re-set if changed + klist := []KeyVal{} + for _, keyword := range SetKeywords { + err := func() error { + var r io.Reader + if info.Mode().IsRegular() { + fh, err := creator.fs.Open(path) + if err != nil { + return err + } + defer fh.Close() + r = fh + } + keyFunc, ok := KeywordFuncs[keyword.Prefix()] + if !ok { + return fmt.Errorf("Unknown keyword %q for file %q", keyword.Prefix(), path) + } + kvs, err := creator.fs.KeywordFunc(keyFunc)(path, info, r) + if err != nil { + return err + } + for _, kv := range kvs { + if kv != "" { + klist = append(klist, kv) + } + } + return nil + }() + if err != nil { + return err + } + } + + needNewSet := false + for _, k := range klist { + if !inKeyValSlice(k, creator.curSet.Keywords) { + needNewSet = true + } + } + if needNewSet { + e := Entry{ + Name: "/set", + Type: SpecialType, + Pos: len(creator.DH.Entries), + Keywords: keyvalSelector(append(defaultSetKeyVals, klist...), keywords), + } + creator.curSet = &e + creator.DH.Entries = append(creator.DH.Entries, e) + } + } + } + encodedEntryName, err := govis.Vis(entryPathName, DefaultVisFlags) + if err != nil { + return err + } + e := Entry{ + Name: encodedEntryName, + Pos: len(creator.DH.Entries), + Type: RelativeType, + Set: creator.curSet, + Parent: creator.curDir, + } + for _, keyword := range keywords { + err := func() error { + var r io.Reader + if info.Mode().IsRegular() { + fh, err := creator.fs.Open(path) + if err != nil { + return err + } + defer fh.Close() + r = fh + } + keyFunc, ok := KeywordFuncs[keyword.Prefix()] + if !ok { + return fmt.Errorf("Unknown keyword %q for file %q", keyword.Prefix(), path) + } + kvs, err := creator.fs.KeywordFunc(keyFunc)(path, info, r) + if err != nil { + return err + } + for _, kv := range kvs { + if kv != "" && !inKeyValSlice(kv, creator.curSet.Keywords) { + e.Keywords = append(e.Keywords, kv) + } + } + return nil + }() + if err != nil { + return err + } + } + if info.IsDir() { + if creator.curDir != nil { + creator.curDir.Next = &e + } + e.Prev = creator.curDir + creator.curDir = &e + } else { + if creator.curEnt != nil { + creator.curEnt.Next = &e + } + e.Prev = creator.curEnt + creator.curEnt = &e + } + creator.DH.Entries = append(creator.DH.Entries, e) + return nil + }) + return creator.DH, err +} + +// startWalk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. +func startWalk(c *dhCreator, root string, walkFn filepath.WalkFunc) error { + info, err := c.fs.Lstat(root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(c, root, info, walkFn) +} + +// walk recursively descends path, calling w. +func walk(c *dhCreator, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readOrderedDirNames(c, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := c.fs.Lstat(filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(c, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + c.DH.Entries = append(c.DH.Entries, Entry{ + Name: "..", + Type: DotDotType, + Pos: len(c.DH.Entries), + }) + if c.curDir != nil { + c.curDir = c.curDir.Parent + } + return nil +} + +// readOrderedDirNames reads the directory and returns a sorted list of all +// entries with non-directories first, followed by directories. +func readOrderedDirNames(c *dhCreator, dirname string) ([]string, error) { + infos, err := c.fs.Readdir(dirname) + if err != nil { + return nil, err + } + + names := []string{} + dirnames := []string{} + for _, info := range infos { + if info.IsDir() { + dirnames = append(dirnames, info.Name()) + continue + } + names = append(names, info.Name()) + } + sort.Strings(names) + sort.Strings(dirnames) + return append(names, dirnames...), nil +} + +// signatureEntries is a simple helper function that returns a slice of Entry's +// that describe the metadata signature about the host. Items like date, user, +// machine, and tree (which is specified by argument `root`), are considered. +// These Entry's construct comments in the mtree specification, so if there is +// an error trying to obtain a particular metadata, we simply don't construct +// the Entry. +func signatureEntries(root string) []Entry { + var sigEntries []Entry + user, err := user.Current() + if err == nil { + userEntry := Entry{ + Type: CommentType, + Raw: fmt.Sprintf("#%16s%s", "user: ", user.Username), + } + sigEntries = append(sigEntries, userEntry) + } + + hostname, err := os.Hostname() + if err == nil { + hostEntry := Entry{ + Type: CommentType, + Raw: fmt.Sprintf("#%16s%s", "machine: ", hostname), + } + sigEntries = append(sigEntries, hostEntry) + } + + if tree := filepath.Clean(root); tree == "." || tree == ".." { + root, err := os.Getwd() + if err == nil { + // use parent directory of current directory + if tree == ".." { + root = filepath.Dir(root) + } + treeEntry := Entry{ + Type: CommentType, + Raw: fmt.Sprintf("#%16s%s", "tree: ", filepath.Clean(root)), + } + sigEntries = append(sigEntries, treeEntry) + } + } else { + treeEntry := Entry{ + Type: CommentType, + Raw: fmt.Sprintf("#%16s%s", "tree: ", filepath.Clean(root)), + } + sigEntries = append(sigEntries, treeEntry) + } + + dateEntry := Entry{ + Type: CommentType, + Raw: fmt.Sprintf("#%16s%s", "date: ", time.Now().Format("Mon Jan 2 15:04:05 2006")), + } + sigEntries = append(sigEntries, dateEntry) + + return sigEntries +} + +// keywordEntries returns a slice of entries including a comment of the +// keywords requested when generating this manifest. +func keywordEntries(keywords []Keyword) []Entry { + // Convert all of the keywords to zero-value keyvals. + return []Entry{ + { + Type: CommentType, + Raw: fmt.Sprintf("#%16s%s", "keywords: ", strings.Join(FromKeywords(keywords), ",")), + }, + } +} diff --git a/vendor/github.com/vbatts/go-mtree/xattr/xattr.go b/vendor/github.com/vbatts/go-mtree/xattr/xattr.go new file mode 100644 index 0000000000..d6ad9cedca --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/xattr/xattr.go @@ -0,0 +1,42 @@ +// +build linux + +package xattr + +import ( + "strings" + "syscall" +) + +// Get returns the extended attributes (xattr) on file `path`, for the given `name`. +func Get(path, name string) ([]byte, error) { + dest := make([]byte, 1024) + i, err := syscall.Getxattr(path, name, dest) + if err != nil { + return nil, err + } + return dest[:i], nil +} + +// Set sets the extended attributes (xattr) on file `path`, for the given `name` and `value` +func Set(path, name string, value []byte) error { + return syscall.Setxattr(path, name, value, 0) +} + +// List returns a list of all the extended attributes (xattr) for file `path` +func List(path string) ([]string, error) { + dest := make([]byte, 1024) + i, err := syscall.Listxattr(path, dest) + if err != nil { + return nil, err + } + + // If the returned list is empty, return nil instead of []string{""} + str := string(dest[:i]) + if str == "" { + return nil, nil + } + + return strings.Split(strings.TrimRight(str, nilByte), nilByte), nil +} + +const nilByte = "\x00" diff --git a/vendor/github.com/vbatts/go-mtree/xattr/xattr_unsupported.go b/vendor/github.com/vbatts/go-mtree/xattr/xattr_unsupported.go new file mode 100644 index 0000000000..2ceda58e18 --- /dev/null +++ b/vendor/github.com/vbatts/go-mtree/xattr/xattr_unsupported.go @@ -0,0 +1,21 @@ +// +build !linux + +package xattr + +// Get would return the extended attributes, but this unsupported feature +// returns nil, nil +func Get(path, name string) ([]byte, error) { + return nil, nil +} + +// Set would set the extended attributes, but this unsupported feature returns +// nil +func Set(path, name string, value []byte) error { + return nil +} + +// List would return the keys of extended attributes, but this unsupported +// feature returns nil, nil +func List(path string) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go index 63786d5f1e..2f1e7df36c 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/validation.go +++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go @@ -747,9 +747,7 @@ func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key str subContext := NewJsonContext(key, context) validationResult := pv.subValidateWithContext(value, subContext) result.mergeErrors(validationResult) - if validationResult.Valid() { - validatedkey = true - } + validatedkey = true } } diff --git a/vendor/go4.org/AUTHORS b/vendor/go4.org/AUTHORS deleted file mode 100644 index d1ad485f52..0000000000 --- a/vendor/go4.org/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of go4 authors for copyright purposes. -# This is distinct from the CONTRIBUTORS file, which is the list of -# people who have contributed, even if they don't own the copyright on -# their work. - -Mathieu Lonjaret -Daniel Theophanes -Google diff --git a/vendor/go4.org/errorutil/highlight.go b/vendor/go4.org/errorutil/highlight.go deleted file mode 100644 index 1b1efb0f68..0000000000 --- a/vendor/go4.org/errorutil/highlight.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2011 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package errorutil helps make better error messages. -package errorutil // import "go4.org/errorutil" - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -// HighlightBytePosition takes a reader and the location in bytes of a parse -// error (for instance, from json.SyntaxError.Offset) and returns the line, column, -// and pretty-printed context around the error with an arrow indicating the exact -// position of the syntax error. -func HighlightBytePosition(f io.Reader, pos int64) (line, col int, highlight string) { - line = 1 - br := bufio.NewReader(f) - lastLine := "" - thisLine := new(bytes.Buffer) - for n := int64(0); n < pos; n++ { - b, err := br.ReadByte() - if err != nil { - break - } - if b == '\n' { - lastLine = thisLine.String() - thisLine.Reset() - line++ - col = 1 - } else { - col++ - thisLine.WriteByte(b) - } - } - if line > 1 { - highlight += fmt.Sprintf("%5d: %s\n", line-1, lastLine) - } - highlight += fmt.Sprintf("%5d: %s\n", line, thisLine.String()) - highlight += fmt.Sprintf("%s^\n", strings.Repeat(" ", col+5)) - return -} diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go new file mode 100644 index 0000000000..6c6e84236a --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go @@ -0,0 +1,120 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ripemd160 implements the RIPEMD-160 hash algorithm. +package ripemd160 // import "golang.org/x/crypto/ripemd160" + +// RIPEMD-160 is designed by by Hans Dobbertin, Antoon Bosselaers, and Bart +// Preneel with specifications available at: +// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf. + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.RIPEMD160, New) +} + +// The size of the checksum in bytes. +const Size = 20 + +// The block size of the hash algorithm in bytes. +const BlockSize = 64 + +const ( + _s0 = 0x67452301 + _s1 = 0xefcdab89 + _s2 = 0x98badcfe + _s3 = 0x10325476 + _s4 = 0xc3d2e1f0 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + s [5]uint32 // running context + x [BlockSize]byte // temporary buffer + nx int // index into x + tc uint64 // total count of bytes processed +} + +func (d *digest) Reset() { + d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4 + d.nx = 0 + d.tc = 0 +} + +// New returns a new hash.Hash computing the checksum. +func New() hash.Hash { + result := new(digest) + result.Reset() + return result +} + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.tc += uint64(nn) + if d.nx > 0 { + n := len(p) + if n > BlockSize-d.nx { + n = BlockSize - d.nx + } + for i := 0; i < n; i++ { + d.x[d.nx+i] = p[i] + } + d.nx += n + if d.nx == BlockSize { + _Block(d, d.x[0:]) + d.nx = 0 + } + p = p[n:] + } + n := _Block(d, p) + p = p[n:] + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := *d0 + + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + tc := d.tc + var tmp [64]byte + tmp[0] = 0x80 + if tc%64 < 56 { + d.Write(tmp[0 : 56-tc%64]) + } else { + d.Write(tmp[0 : 64+56-tc%64]) + } + + // Length in bits. + tc <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(tc >> (8 * i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + var digest [Size]byte + for i, s := range d.s { + digest[i*4] = byte(s) + digest[i*4+1] = byte(s >> 8) + digest[i*4+2] = byte(s >> 16) + digest[i*4+3] = byte(s >> 24) + } + + return append(in, digest[:]...) +} diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go new file mode 100644 index 0000000000..e0edc02f0f --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go @@ -0,0 +1,165 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// RIPEMD-160 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package ripemd160 + +import ( + "math/bits" +) + +// work buffer indices and roll amounts for one line +var _n = [80]uint{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8, + 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12, + 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2, + 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13, +} + +var _r = [80]uint{ + 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8, + 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12, + 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5, + 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12, + 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6, +} + +// same for the other parallel one +var n_ = [80]uint{ + 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12, + 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2, + 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13, + 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14, + 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11, +} + +var r_ = [80]uint{ + 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6, + 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11, + 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5, + 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8, + 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11, +} + +func _Block(md *digest, p []byte) int { + n := 0 + var x [16]uint32 + var alpha, beta uint32 + for len(p) >= BlockSize { + a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4] + aa, bb, cc, dd, ee := a, b, c, d, e + j := 0 + for i := 0; i < 16; i++ { + x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + + // round 1 + i := 0 + for i < 16 { + alpha = a + (b ^ c ^ d) + x[_n[i]] + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 2 + for i < 32 { + alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 3 + for i < 48 { + alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 4 + for i < 64 { + alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 5 + for i < 80 { + alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ cc ^ dd) + x[n_[i]] + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // combine results + dd += c + md.s[1] + md.s[1] = md.s[2] + d + ee + md.s[2] = md.s[3] + e + aa + md.s[3] = md.s[4] + a + bb + md.s[4] = md.s[0] + b + cc + md.s[0] = dd + + p = p[BlockSize:] + n += BlockSize + } + return n +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 1922b25bc0..8eb6f76c0e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -24,6 +24,8 @@ github.com/Microsoft/hcsshim/internal/safefile github.com/Netflix/go-expect # github.com/alexflint/go-filemutex v0.0.0-20171028004239-d358565f3c3f github.com/alexflint/go-filemutex +# github.com/apex/log v1.1.0 +github.com/apex/log # github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/beorn7/perks/quantile # github.com/blang/semver v3.5.1+incompatible @@ -120,6 +122,8 @@ github.com/coreos/go-systemd/dbus github.com/coreos/go-systemd/activation # github.com/cpuguy83/go-md2man v1.0.10 github.com/cpuguy83/go-md2man/md2man +# github.com/cyphar/filepath-securejoin v0.2.2 +github.com/cyphar/filepath-securejoin # github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c github.com/d2g/dhcp4 # github.com/d2g/dhcp4client v0.0.0-20180611075603-e61299896203 @@ -204,11 +208,11 @@ github.com/gorilla/context github.com/gorilla/mux # github.com/gorilla/websocket v1.4.0 github.com/gorilla/websocket -# github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce +# github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357 github.com/hashicorp/errwrap # github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-cleanhttp -# github.com/hashicorp/go-multierror v0.0.0-20171204182908-b7773ae21874 +# github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0 github.com/hashicorp/go-multierror # github.com/hashicorp/go-retryablehttp v0.5.3 github.com/hashicorp/go-retryablehttp @@ -218,6 +222,14 @@ github.com/inconshreveable/mousetrap github.com/j-keck/arping # github.com/juju/errors v0.0.0-20190207033735-e65537c515d7 github.com/juju/errors +# github.com/klauspost/compress v1.4.1 +github.com/klauspost/compress/flate +# github.com/klauspost/cpuid v1.2.0 +github.com/klauspost/cpuid +# github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 +github.com/klauspost/crc32 +# github.com/klauspost/pgzip v0.0.0-20170402124221-0bf5dcad4ada +github.com/klauspost/pgzip # github.com/kr/pty v1.1.3 github.com/kr/pty # github.com/kubernetes-sigs/cri-o v0.0.0-20180917213123-8afc34092907 @@ -234,20 +246,33 @@ github.com/mattn/go-shellwords github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c github.com/mtrmac/gpgme +# github.com/openSUSE/umoci v0.4.4 +github.com/openSUSE/umoci +github.com/openSUSE/umoci/oci/layer +github.com/openSUSE/umoci/oci/cas/dir +github.com/openSUSE/umoci/oci/casext +github.com/openSUSE/umoci/oci/config/generate +github.com/openSUSE/umoci/pkg/idtools +github.com/openSUSE/umoci/oci/cas +github.com/openSUSE/umoci/oci/config/convert +github.com/openSUSE/umoci/pkg/fseval +github.com/openSUSE/umoci/pkg/system +github.com/openSUSE/umoci/pkg/testutils +github.com/openSUSE/umoci/pkg/unpriv +github.com/openSUSE/umoci/third_party/shared +github.com/openSUSE/umoci/pkg/hardening +github.com/openSUSE/umoci/third_party/user # github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v0.0.0-20180411145040-e562b0440392 +# github.com/opencontainers/image-spec v1.0.0 => github.com/opencontainers/image-spec v0.0.0-20180411145040-e562b0440392 github.com/opencontainers/image-spec/specs-go/v1 github.com/opencontainers/image-spec/specs-go -github.com/opencontainers/image-spec/schema -# github.com/opencontainers/image-tools v0.0.0-20180129025323-c95f76cbae74 => github.com/sylabs/image-tools v0.0.0-20181006203805-2814f4980568 -github.com/opencontainers/image-tools/image # github.com/opencontainers/runc v0.1.1 github.com/opencontainers/runc/libcontainer/system github.com/opencontainers/runc/libcontainer/user -# github.com/opencontainers/runtime-spec v0.0.0-20180913141938-5806c3563733 +# github.com/opencontainers/runtime-spec v1.0.0 => github.com/opencontainers/runtime-spec v0.0.0-20180913141938-5806c3563733 github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/runtime-tools v0.6.0 +# github.com/opencontainers/runtime-tools v0.7.0 github.com/opencontainers/runtime-tools/generate github.com/opencontainers/runtime-tools/generate/seccomp github.com/opencontainers/runtime-tools/validate @@ -277,6 +302,8 @@ github.com/prometheus/procfs github.com/prometheus/procfs/nfs github.com/prometheus/procfs/xfs github.com/prometheus/procfs/internal/util +# github.com/rootless-containers/proto v0.1.0 +github.com/rootless-containers/proto/go-proto # github.com/russross/blackfriday v1.5.2 github.com/russross/blackfriday # github.com/safchain/ethtool v0.0.0-20180504150752-6e3f4faa84e1 @@ -285,7 +312,7 @@ github.com/safchain/ethtool github.com/satori/go.uuid # github.com/seccomp/libseccomp-golang v0.9.0 github.com/seccomp/libseccomp-golang -# github.com/sirupsen/logrus v1.0.5 +# github.com/sirupsen/logrus v1.0.6 github.com/sirupsen/logrus # github.com/spf13/cobra v0.0.0-20190321000552-67fc4837d267 github.com/spf13/cobra @@ -306,6 +333,12 @@ github.com/sylabs/sif/pkg/sif github.com/sylabs/sif/internal/app/siftool # github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e github.com/syndtr/gocapability/capability +# github.com/urfave/cli v1.20.0 +github.com/urfave/cli +# github.com/vbatts/go-mtree v0.4.3 +github.com/vbatts/go-mtree +github.com/vbatts/go-mtree/pkg/govis +github.com/vbatts/go-mtree/xattr # github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl @@ -315,10 +348,8 @@ github.com/vishvananda/netns github.com/xeipuuv/gojsonpointer # github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 github.com/xeipuuv/gojsonreference -# github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f +# github.com/xeipuuv/gojsonschema v0.0.0-20180719132039-b84684d0e066 github.com/xeipuuv/gojsonschema -# go4.org v0.0.0-20180417224846-9599cf28b011 -go4.org/errorutil # golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 => github.com/sylabs/golang-x-crypto v0.0.0-20181006204705-4bce89e8e9a9 golang.org/x/crypto/ssh/terminal golang.org/x/crypto/openpgp @@ -329,10 +360,11 @@ golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/cast5 golang.org/x/crypto/openpgp/elgamal +golang.org/x/crypto/ripemd160 # golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/net/context/ctxhttp -golang.org/x/net/proxy golang.org/x/net/context +golang.org/x/net/proxy golang.org/x/net/internal/socks # golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sync/errgroup From 79d975dd60eae3e62c5fd5486e18a0aa2c405146 Mon Sep 17 00:00:00 2001 From: Adam Simpson Date: Thu, 11 Jul 2019 16:29:59 -0700 Subject: [PATCH 2/7] uniform error messages --- internal/pkg/build/sources/conveyorPacker_oci.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/pkg/build/sources/conveyorPacker_oci.go b/internal/pkg/build/sources/conveyorPacker_oci.go index a9e1bde2fe..17c60646b4 100644 --- a/internal/pkg/build/sources/conveyorPacker_oci.go +++ b/internal/pkg/build/sources/conveyorPacker_oci.go @@ -299,33 +299,33 @@ func (cp *OCIConveyorPacker) unpackTmpfs() (err error) { uidMap, err := idtools.ParseMapping(fmt.Sprintf("0:%d:1", os.Geteuid())) if err != nil { - return fmt.Errorf("failure parsing uidmap: %s", err) + return fmt.Errorf("error parsing uidmap: %s", err) } mapOptions.UIDMappings = append(mapOptions.UIDMappings, uidMap) gidMap, err := idtools.ParseMapping(fmt.Sprintf("0:%d:1", os.Getegid())) if err != nil { - return fmt.Errorf("failure parsing gidmap: %s", err) + return fmt.Errorf("error parsing gidmap: %s", err) } mapOptions.GIDMappings = append(mapOptions.GIDMappings, gidMap) } engineExt, err := umoci.OpenLayout(cp.b.Path) if err != nil { - return fmt.Errorf("Failed to open layout: %s", err) + return fmt.Errorf("error opening layout: %s", err) } // Obtain the manifest imageSource, err := cp.tmpfsRef.NewImageSource(context.Background(), cp.sysCtx) if err != nil { - return fmt.Errorf("Create image source: %s", err) + return fmt.Errorf("error creating image source: %s", err) } manifestData, mediaType, err := imageSource.GetManifest(context.Background(), nil) if err != nil { - return fmt.Errorf("Obtain manifest source: %s", err) + return fmt.Errorf("error obtaining manifest source: %s", err) } if mediaType != imgspecv1.MediaTypeImageManifest { - return fmt.Errorf("Manifest has invalid MIMEtype: %s", mediaType) + return fmt.Errorf("error verifying manifest media type: %s", mediaType) } var manifest imgspecv1.Manifest json.Unmarshal(manifestData, &manifest) From ebacae0515fd291f73b39b0816b62ed0f07eb408 Mon Sep 17 00:00:00 2001 From: Adam Simpson Date: Thu, 11 Jul 2019 20:09:31 -0700 Subject: [PATCH 3/7] freshen up vendor --- go.sum | 4 ---- vendor/modules.txt | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/go.sum b/go.sum index e2b3515d26..a576938599 100644 --- a/go.sum +++ b/go.sum @@ -191,14 +191,10 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v0.0.0-20180411145040-e562b0440392 h1:rBwY4zl6Rvzh0RyFbELnswKxVfiq7xB/d2sfgy3PmHI= github.com/opencontainers/image-spec v0.0.0-20180411145040-e562b0440392/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.0 h1:jcw3cCH887bLKETGYpv8afogdYchbShR0eH6oD9d5PQ= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v0.0.0-20180913141938-5806c3563733 h1:3g+PhOUU7d+gimwdmJU++EIKFzWUASEviZHhpOt/Zvw= github.com/opencontainers/runtime-spec v0.0.0-20180913141938-5806c3563733/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg= -github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.7.0 h1:MIjqgwi4ZC+eVNGiYotCUYuTfs/oWDEcigK9Ra5ruHU= github.com/opencontainers/runtime-tools v0.7.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.0.0-rc1 h1:Q70KvmpJSrYzryl/d0tC3vWUiTn23cSdStKodlokEPs= diff --git a/vendor/modules.txt b/vendor/modules.txt index 8eb6f76c0e..bc8c813eec 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -249,10 +249,10 @@ github.com/mtrmac/gpgme # github.com/openSUSE/umoci v0.4.4 github.com/openSUSE/umoci github.com/openSUSE/umoci/oci/layer +github.com/openSUSE/umoci/pkg/idtools github.com/openSUSE/umoci/oci/cas/dir github.com/openSUSE/umoci/oci/casext github.com/openSUSE/umoci/oci/config/generate -github.com/openSUSE/umoci/pkg/idtools github.com/openSUSE/umoci/oci/cas github.com/openSUSE/umoci/oci/config/convert github.com/openSUSE/umoci/pkg/fseval From 88ddaa5c55c345c4f7dc67032d4b4fe41bd01e74 Mon Sep 17 00:00:00 2001 From: Adam Simpson Date: Thu, 11 Jul 2019 21:22:36 -0700 Subject: [PATCH 4/7] update vendor --- go.sum | 57 ++----------------- vendor/github.com/openSUSE/umoci/packaging | 1 + .../proto/go-proto/rootlesscontainers.proto | 1 + 3 files changed, 8 insertions(+), 51 deletions(-) create mode 100644 vendor/github.com/openSUSE/umoci/packaging create mode 100644 vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto diff --git a/go.sum b/go.sum index a576938599..90295fe367 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,4 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.4.7 h1:vOvDiY/F1avSWlCWiKJjdYKz2jVjTK3pWPHndeG4OAY= github.com/Microsoft/go-winio v0.4.7/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= @@ -14,19 +13,14 @@ github.com/apex/log v1.1.0/go.mod h1:yA770aXIDQrhVOIGurT/pVdfCpSq1GQV/auzMN5fzvY github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23 h1:D21IyuvjDCshj1/qq+pCNd3VZOAEI9jy6Bi131YlXgI= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bugsnag/bugsnag-go v1.5.1 h1:NnfkWPiRGJlUg6s5mRlsbudWcW/B/eGFSad98JxitaU= github.com/bugsnag/bugsnag-go v1.5.1/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.2.0 h1:OzrKrRvXis8qEvOkfcxNcYbOd2O7xXS2nnKMEMABFQA= github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/cgroups v0.0.0-20181208203134-65ce98b3dfeb h1:NbTtlERiUsSc6Mbe02Cobgn7tjaSaF6okRFW7FCT0JY= @@ -39,7 +33,7 @@ github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.1 h1:dJbykiiSIS3Xvo8d+A6rSXcUEFGfvCjUA+bUED4qegQ= github.com/containernetworking/plugins v0.8.1/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU= -github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba h1:gSB68H7q2WphSW/Qv2b8/vquMvB6Hx0I7KkYO6pulsQ= +github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba h1:kUvFz4PWuB3DGy730A29+433HSykc53uvAKhWmoCPXU= github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= github.com/containers/storage v0.0.0-20180604200230-88d80428f9b1 h1:XxYkhkQbl5U+c930nkoJM46gy4xn/UbZlpZ/2kkAKWo= github.com/containers/storage v0.0.0-20180604200230-88d80428f9b1/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM= @@ -58,17 +52,14 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c h1:Xo2rK1pzOm0jO6abTPIQw github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v0.0.0-20180611075603-e61299896203 h1:2/TaU1mJO2o4BTLnqz6KxJxe7ektbzoU11yqa8k6N9Y= github.com/d2g/dhcp4client v0.0.0-20180611075603-e61299896203/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5 h1:+CpLbZIeUn94m02LdEKPcgErLJ347NUwxPKs5u8ieiY= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4 h1:itqmmf1PFpC4n5JW+j4BU7X4MTfVurhYRTjODoPb2Y8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deislabs/oras v0.4.0 h1:G/wxhm9Simecxlr7dsjWy5iL0UGPAQlnuFNRZsXVJfI= github.com/deislabs/oras v0.4.0/go.mod h1:SXwPnImOu69FofPWaqgB+cPKKQRBmao5i+9xQRdcOiM= github.com/docker/distribution v0.0.0-20180611183926-749f6afb4572 h1:i5cGFDf/s0ocGJXaJHJR+NiDgMb5oLoUNpGlRZqurlY= github.com/docker/distribution v0.0.0-20180611183926-749f6afb4572/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.0.0-20180522102801-da99009bbb11 h1:p8hSDXZgVhyh/C9bPlG8QMY64VeXtVfjmjIlzaQok5Q= +github.com/docker/docker v0.0.0-20180522102801-da99009bbb11 h1:UIlbTLoIplW1xQDT+7yqI8ihfHxg73S5vyUsScDWTZc= github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.0 h1:5bhDRLn1roGiNjz8IezRngHxMfoeaXGyr0BeMHq4rD8= github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= @@ -83,9 +74,7 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNE github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc= github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -93,7 +82,6 @@ github.com/go-log/log v0.1.0 h1:wudGTNsiGzrD5ZjgIkVZ517ugi2XRe9Q/xRCzwEO4/U= github.com/go-log/log v0.1.0/go.mod h1:4mBwpdRMFLiuXZDCwU2lKQFsoSCo72j3HqBK9d81N2M= github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4= github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= @@ -106,13 +94,11 @@ github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.4.0 h1:XulKRWSQK5uChr4pEgSE4Tc/OcmnU9GJuSwdog/tZsA= github.com/gorilla/handlers v1.4.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357 h1:Rem2+U35z1QtPQc6r+WolF7yXiefXqDKyk+lN2pE164= github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -124,9 +110,7 @@ github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1: github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -134,11 +118,8 @@ github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56 h1:742eGXur0715JMq73 github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/juju/errors v0.0.0-20190207033735-e65537c515d7 h1:dMIPRDg6gi7CUp0Kj2+HxqJ5kTr1iAdzsXYIrLCNSmU= github.com/juju/errors v0.0.0-20190207033735-e65537c515d7/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= -github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= -github.com/juju/testing v0.0.0-20190613124551-e81189438503 h1:ZUgTbk8oHgP0jpMieifGC9Lv47mHn8Pb3mFX3/Ew4iY= github.com/juju/testing v0.0.0-20190613124551-e81189438503/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E= @@ -150,17 +131,14 @@ github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRi github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v0.0.0-20170402124221-0bf5dcad4ada h1:ZHhgRyr+9LYwfuWChpSTCCe/07V26LEElTKUXj+2fAg= github.com/klauspost/pgzip v0.0.0-20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3 h1:/Um6a/ZmD5tF7peoOJ5oN5KMQ0DrGVQSXLNwyckutPk= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kubernetes-sigs/cri-o v0.0.0-20180917213123-8afc34092907 h1:vYZ9WIuOKL8VB+Foaspns++EJF+5qF17aZNiC7NAh98= github.com/kubernetes-sigs/cri-o v0.0.0-20180917213123-8afc34092907/go.mod h1:OU8KMZFn54AzPFUlmy3DiEnN/EkdTJDWVUy+gl3z8zE= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= @@ -176,16 +154,13 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb h1:e+l77LJOEqXTIQihQJVkA6ZxPOUmfPM5e4H7rcpgtSk= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c h1:xa+eQWKuJ9MbB9FBL/eoNvDFvveAkz2LQoz8PzX7Q/4= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTcW2dxoD/SO3n2enrgWl3y6Dnx4m59GvcA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openSUSE/umoci v0.4.4 h1:CNwlje61gxLf8Yg8wvfFFURN2a9UtczO+6HQS9x+r+s= +github.com/openSUSE/umoci v0.4.4 h1:pTM5xAnCpYfWugNp8ZL1e83NiwDxFopqwU3RVSU6l9Y= github.com/openSUSE/umoci v0.4.4/go.mod h1:WC0knmZfXsEOZyJUv/k3zDOCzjyaFTGMvooGTe4NzIw= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -201,11 +176,9 @@ github.com/opencontainers/selinux v1.0.0-rc1 h1:Q70KvmpJSrYzryl/d0tC3vWUiTn23cSd github.com/opencontainers/selinux v1.0.0-rc1/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13 h1:AUK/hm/tPsiNNASdb3J8fySVRZoI7fnK5mlOvdFD43o= github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= @@ -217,7 +190,7 @@ github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be h1:MoyXp/VjXUwM0GyDcdwT7Ubea2gxOSHpPaFo3qV+Y2A= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rootless-containers/proto v0.1.0 h1:gS1JOMEtk1YDYHCzBAf/url+olMJbac7MTrgSeP6zh4= +github.com/rootless-containers/proto v0.1.0 h1:+VUNCatWCjNzgrheqcy9qN/VThibH8/cGHQG9UJy2ag= github.com/rootless-containers/proto v0.1.0/go.mod h1:vgkUFZbQd0gcE/K/ZwtE4MYjZPu0UNHLXIQxhyqAFh8= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -237,9 +210,7 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stevvooe/resumable v0.0.0-20180830230917-22b14a53ba50 h1:4bT0pPowCpQImewr+BjzfUKcuFW+KVyB8d1OF3b6oTI= github.com/stevvooe/resumable v0.0.0-20180830230917-22b14a53ba50/go.mod h1:1pdIZTAHUz+HDKDVZ++5xg/duPlhKAIzw9qy42CWYp4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/sylabs/golang-x-crypto v0.0.0-20181006204705-4bce89e8e9a9 h1:OjtuUh4ZvQpHdwDHOgi8LM0skj8imSc2Hz6966oGxKY= github.com/sylabs/golang-x-crypto v0.0.0-20181006204705-4bce89e8e9a9/go.mod h1:Qf7xZmhvuwq9Hq4LdNLS4xabRQkPJSvEP3Bh4UFG0v4= @@ -258,7 +229,7 @@ github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e/go.mod h1:hkRG github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vbatts/go-mtree v0.4.3 h1:IC2s9EpogK3QzU+VsfuEdM7POkwnW43XDGAWO2Rb1Bo= +github.com/vbatts/go-mtree v0.4.3 h1:U57BeTKpgmNcMu7lRJHzx6GHsstjGT7A9+iqviOuvtQ= github.com/vbatts/go-mtree v0.4.3/go.mod h1:3sazBqLG4bZYmgRTgdh9X3iKTzwBpp5CrREJDzrNSXY= github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6 h1:YqlaLDyh/+jUHgh83iNy8KiCvD4LeqnSS5U5a9cgYqU= github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= @@ -270,18 +241,13 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180719132039-b84684d0e066 h1:iBmpEMJZETMKCupjL9Q7X3Q5utIRnWGbls0TXTgD7JI= github.com/xeipuuv/gojsonschema v0.0.0-20180719132039-b84684d0e066/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xenolf/lego v2.5.0+incompatible h1:vjkBSqBww+pMeQgH/VjbVZPP+qccOmNE82TgC4CO8cI= github.com/xenolf/lego v2.5.0+incompatible/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940 h1:p7OofyZ509h8DmPLh8Hn+EIIZm/xYhdZHJ9GnXHdr6U= github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.6 h1:qMJQYPNdtJ7UNYHjX38KXZtltKTqimMuoQjNnSVIuJg= github.com/yvasiyarov/gorelic v0.0.6/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 h1:AsFN8kXcCVkUFHyuzp1FtYbzp1nCO/H6+1uPSGEyPzM= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180801234040-f4c29de78a2a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -291,14 +257,11 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEha golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180801221139-3dc4335d56c7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -307,28 +270,20 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/grpc v1.20.0 h1:DlsSIrgEBuZAUFJcta2B5i/lzeHHbnfkNFAfFXLVFYQ= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25 h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.3.0+incompatible h1:oTCKjb7ZuVfn37AQodk7UysjQQL/S5Dep3pzi59u1NQ= +gotest.tools v2.3.0+incompatible h1:zANm08BR3aLMhyvHi+AxtY5IhnqJOnWsNhtRkKkZhmM= gotest.tools v2.3.0+incompatible/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/client-go v0.0.0-20181010045704-56e7a63b5e38 h1:KirVQhD3RM/NNQUJeinP5Bq4He0bv2RopF2RFxrC7Ck= k8s.io/client-go v0.0.0-20181010045704-56e7a63b5e38/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -rsc.io/letsencrypt v0.0.1 h1:DV0d09Ne9E7UUa9ZqWktZ9L2VmybgTgfq7xlfFR/bbU= rsc.io/letsencrypt v0.0.1/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= diff --git a/vendor/github.com/openSUSE/umoci/packaging b/vendor/github.com/openSUSE/umoci/packaging new file mode 100644 index 0000000000..dd2596cf07 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/packaging @@ -0,0 +1 @@ +contrib/pkg \ No newline at end of file diff --git a/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto b/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto new file mode 100644 index 0000000000..448e35d835 --- /dev/null +++ b/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto @@ -0,0 +1 @@ +../rootlesscontainers.proto \ No newline at end of file From cef3431d3898f1f618c785ea72b72a7064d39c4e Mon Sep 17 00:00:00 2001 From: Adam Simpson Date: Thu, 11 Jul 2019 21:32:15 -0700 Subject: [PATCH 5/7] update vendor --- go.sum | 12 ++++++------ vendor/github.com/openSUSE/umoci/packaging | 1 - .../proto/go-proto/rootlesscontainers.proto | 1 - 3 files changed, 6 insertions(+), 8 deletions(-) delete mode 100644 vendor/github.com/openSUSE/umoci/packaging delete mode 100644 vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto diff --git a/go.sum b/go.sum index 90295fe367..89f5013be6 100644 --- a/go.sum +++ b/go.sum @@ -33,7 +33,7 @@ github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.1 h1:dJbykiiSIS3Xvo8d+A6rSXcUEFGfvCjUA+bUED4qegQ= github.com/containernetworking/plugins v0.8.1/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU= -github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba h1:kUvFz4PWuB3DGy730A29+433HSykc53uvAKhWmoCPXU= +github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba h1:gSB68H7q2WphSW/Qv2b8/vquMvB6Hx0I7KkYO6pulsQ= github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= github.com/containers/storage v0.0.0-20180604200230-88d80428f9b1 h1:XxYkhkQbl5U+c930nkoJM46gy4xn/UbZlpZ/2kkAKWo= github.com/containers/storage v0.0.0-20180604200230-88d80428f9b1/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM= @@ -59,7 +59,7 @@ github.com/deislabs/oras v0.4.0 h1:G/wxhm9Simecxlr7dsjWy5iL0UGPAQlnuFNRZsXVJfI= github.com/deislabs/oras v0.4.0/go.mod h1:SXwPnImOu69FofPWaqgB+cPKKQRBmao5i+9xQRdcOiM= github.com/docker/distribution v0.0.0-20180611183926-749f6afb4572 h1:i5cGFDf/s0ocGJXaJHJR+NiDgMb5oLoUNpGlRZqurlY= github.com/docker/distribution v0.0.0-20180611183926-749f6afb4572/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.0.0-20180522102801-da99009bbb11 h1:UIlbTLoIplW1xQDT+7yqI8ihfHxg73S5vyUsScDWTZc= +github.com/docker/docker v0.0.0-20180522102801-da99009bbb11 h1:p8hSDXZgVhyh/C9bPlG8QMY64VeXtVfjmjIlzaQok5Q= github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.0 h1:5bhDRLn1roGiNjz8IezRngHxMfoeaXGyr0BeMHq4rD8= github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= @@ -160,7 +160,7 @@ github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTc github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openSUSE/umoci v0.4.4 h1:pTM5xAnCpYfWugNp8ZL1e83NiwDxFopqwU3RVSU6l9Y= +github.com/openSUSE/umoci v0.4.4 h1:CNwlje61gxLf8Yg8wvfFFURN2a9UtczO+6HQS9x+r+s= github.com/openSUSE/umoci v0.4.4/go.mod h1:WC0knmZfXsEOZyJUv/k3zDOCzjyaFTGMvooGTe4NzIw= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -190,7 +190,7 @@ github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be h1:MoyXp/VjXUwM0GyDcdwT7Ubea2gxOSHpPaFo3qV+Y2A= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rootless-containers/proto v0.1.0 h1:+VUNCatWCjNzgrheqcy9qN/VThibH8/cGHQG9UJy2ag= +github.com/rootless-containers/proto v0.1.0 h1:gS1JOMEtk1YDYHCzBAf/url+olMJbac7MTrgSeP6zh4= github.com/rootless-containers/proto v0.1.0/go.mod h1:vgkUFZbQd0gcE/K/ZwtE4MYjZPu0UNHLXIQxhyqAFh8= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -229,7 +229,7 @@ github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e/go.mod h1:hkRG github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vbatts/go-mtree v0.4.3 h1:U57BeTKpgmNcMu7lRJHzx6GHsstjGT7A9+iqviOuvtQ= +github.com/vbatts/go-mtree v0.4.3 h1:IC2s9EpogK3QzU+VsfuEdM7POkwnW43XDGAWO2Rb1Bo= github.com/vbatts/go-mtree v0.4.3/go.mod h1:3sazBqLG4bZYmgRTgdh9X3iKTzwBpp5CrREJDzrNSXY= github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6 h1:YqlaLDyh/+jUHgh83iNy8KiCvD4LeqnSS5U5a9cgYqU= github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= @@ -282,7 +282,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.3.0+incompatible h1:zANm08BR3aLMhyvHi+AxtY5IhnqJOnWsNhtRkKkZhmM= +gotest.tools v2.3.0+incompatible h1:oTCKjb7ZuVfn37AQodk7UysjQQL/S5Dep3pzi59u1NQ= gotest.tools v2.3.0+incompatible/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/client-go v0.0.0-20181010045704-56e7a63b5e38/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= diff --git a/vendor/github.com/openSUSE/umoci/packaging b/vendor/github.com/openSUSE/umoci/packaging deleted file mode 100644 index dd2596cf07..0000000000 --- a/vendor/github.com/openSUSE/umoci/packaging +++ /dev/null @@ -1 +0,0 @@ -contrib/pkg \ No newline at end of file diff --git a/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto b/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto deleted file mode 100644 index 448e35d835..0000000000 --- a/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto +++ /dev/null @@ -1 +0,0 @@ -../rootlesscontainers.proto \ No newline at end of file From 6d62c3476840b043994a84a6e25d39e5a80062d6 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 12 Jul 2019 04:37:43 +0000 Subject: [PATCH 6/7] vendor from within container --- go.sum | 12 ++-- .../klauspost/cpuid/CONTRIBUTING.txt | 70 +++++++++---------- vendor/github.com/klauspost/crc32/.travis.yml | 26 +++---- vendor/github.com/openSUSE/umoci/packaging | 1 + .../proto/go-proto/rootlesscontainers.proto | 1 + 5 files changed, 56 insertions(+), 54 deletions(-) create mode 100644 vendor/github.com/openSUSE/umoci/packaging create mode 100644 vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto diff --git a/go.sum b/go.sum index 89f5013be6..90295fe367 100644 --- a/go.sum +++ b/go.sum @@ -33,7 +33,7 @@ github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.1 h1:dJbykiiSIS3Xvo8d+A6rSXcUEFGfvCjUA+bUED4qegQ= github.com/containernetworking/plugins v0.8.1/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU= -github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba h1:gSB68H7q2WphSW/Qv2b8/vquMvB6Hx0I7KkYO6pulsQ= +github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba h1:kUvFz4PWuB3DGy730A29+433HSykc53uvAKhWmoCPXU= github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= github.com/containers/storage v0.0.0-20180604200230-88d80428f9b1 h1:XxYkhkQbl5U+c930nkoJM46gy4xn/UbZlpZ/2kkAKWo= github.com/containers/storage v0.0.0-20180604200230-88d80428f9b1/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM= @@ -59,7 +59,7 @@ github.com/deislabs/oras v0.4.0 h1:G/wxhm9Simecxlr7dsjWy5iL0UGPAQlnuFNRZsXVJfI= github.com/deislabs/oras v0.4.0/go.mod h1:SXwPnImOu69FofPWaqgB+cPKKQRBmao5i+9xQRdcOiM= github.com/docker/distribution v0.0.0-20180611183926-749f6afb4572 h1:i5cGFDf/s0ocGJXaJHJR+NiDgMb5oLoUNpGlRZqurlY= github.com/docker/distribution v0.0.0-20180611183926-749f6afb4572/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.0.0-20180522102801-da99009bbb11 h1:p8hSDXZgVhyh/C9bPlG8QMY64VeXtVfjmjIlzaQok5Q= +github.com/docker/docker v0.0.0-20180522102801-da99009bbb11 h1:UIlbTLoIplW1xQDT+7yqI8ihfHxg73S5vyUsScDWTZc= github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.0 h1:5bhDRLn1roGiNjz8IezRngHxMfoeaXGyr0BeMHq4rD8= github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= @@ -160,7 +160,7 @@ github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTc github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openSUSE/umoci v0.4.4 h1:CNwlje61gxLf8Yg8wvfFFURN2a9UtczO+6HQS9x+r+s= +github.com/openSUSE/umoci v0.4.4 h1:pTM5xAnCpYfWugNp8ZL1e83NiwDxFopqwU3RVSU6l9Y= github.com/openSUSE/umoci v0.4.4/go.mod h1:WC0knmZfXsEOZyJUv/k3zDOCzjyaFTGMvooGTe4NzIw= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -190,7 +190,7 @@ github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be h1:MoyXp/VjXUwM0GyDcdwT7Ubea2gxOSHpPaFo3qV+Y2A= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rootless-containers/proto v0.1.0 h1:gS1JOMEtk1YDYHCzBAf/url+olMJbac7MTrgSeP6zh4= +github.com/rootless-containers/proto v0.1.0 h1:+VUNCatWCjNzgrheqcy9qN/VThibH8/cGHQG9UJy2ag= github.com/rootless-containers/proto v0.1.0/go.mod h1:vgkUFZbQd0gcE/K/ZwtE4MYjZPu0UNHLXIQxhyqAFh8= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -229,7 +229,7 @@ github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e/go.mod h1:hkRG github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vbatts/go-mtree v0.4.3 h1:IC2s9EpogK3QzU+VsfuEdM7POkwnW43XDGAWO2Rb1Bo= +github.com/vbatts/go-mtree v0.4.3 h1:U57BeTKpgmNcMu7lRJHzx6GHsstjGT7A9+iqviOuvtQ= github.com/vbatts/go-mtree v0.4.3/go.mod h1:3sazBqLG4bZYmgRTgdh9X3iKTzwBpp5CrREJDzrNSXY= github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6 h1:YqlaLDyh/+jUHgh83iNy8KiCvD4LeqnSS5U5a9cgYqU= github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= @@ -282,7 +282,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.3.0+incompatible h1:oTCKjb7ZuVfn37AQodk7UysjQQL/S5Dep3pzi59u1NQ= +gotest.tools v2.3.0+incompatible h1:zANm08BR3aLMhyvHi+AxtY5IhnqJOnWsNhtRkKkZhmM= gotest.tools v2.3.0+incompatible/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/client-go v0.0.0-20181010045704-56e7a63b5e38/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt index 452d28eda8..2ef4714f71 100644 --- a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt +++ b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt @@ -1,35 +1,35 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2015- Klaus Post & Contributors. -Email: klauspost@gmail.com - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2015- Klaus Post & Contributors. +Email: klauspost@gmail.com + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/crc32/.travis.yml b/vendor/github.com/klauspost/crc32/.travis.yml index c50f5b7b0d..de64ae491f 100644 --- a/vendor/github.com/klauspost/crc32/.travis.yml +++ b/vendor/github.com/klauspost/crc32/.travis.yml @@ -1,13 +1,13 @@ -language: go - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip - -script: - - go test -v . - - go test -v -race . +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip + +script: + - go test -v . + - go test -v -race . diff --git a/vendor/github.com/openSUSE/umoci/packaging b/vendor/github.com/openSUSE/umoci/packaging new file mode 100644 index 0000000000..dd2596cf07 --- /dev/null +++ b/vendor/github.com/openSUSE/umoci/packaging @@ -0,0 +1 @@ +contrib/pkg \ No newline at end of file diff --git a/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto b/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto new file mode 100644 index 0000000000..448e35d835 --- /dev/null +++ b/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto @@ -0,0 +1 @@ +../rootlesscontainers.proto \ No newline at end of file From 51f16a8a42dfd55e7fdc127b42fe6a76c1d1f3ac Mon Sep 17 00:00:00 2001 From: root Date: Fri, 12 Jul 2019 05:20:06 +0000 Subject: [PATCH 7/7] vendor using correct image tag --- go.sum | 12 ++++++------ vendor/github.com/openSUSE/umoci/packaging | 1 - .../proto/go-proto/rootlesscontainers.proto | 1 - 3 files changed, 6 insertions(+), 8 deletions(-) delete mode 100644 vendor/github.com/openSUSE/umoci/packaging delete mode 100644 vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto diff --git a/go.sum b/go.sum index 90295fe367..89f5013be6 100644 --- a/go.sum +++ b/go.sum @@ -33,7 +33,7 @@ github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.1 h1:dJbykiiSIS3Xvo8d+A6rSXcUEFGfvCjUA+bUED4qegQ= github.com/containernetworking/plugins v0.8.1/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU= -github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba h1:kUvFz4PWuB3DGy730A29+433HSykc53uvAKhWmoCPXU= +github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba h1:gSB68H7q2WphSW/Qv2b8/vquMvB6Hx0I7KkYO6pulsQ= github.com/containers/image v0.0.0-20180612162315-2e4f799f5eba/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= github.com/containers/storage v0.0.0-20180604200230-88d80428f9b1 h1:XxYkhkQbl5U+c930nkoJM46gy4xn/UbZlpZ/2kkAKWo= github.com/containers/storage v0.0.0-20180604200230-88d80428f9b1/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM= @@ -59,7 +59,7 @@ github.com/deislabs/oras v0.4.0 h1:G/wxhm9Simecxlr7dsjWy5iL0UGPAQlnuFNRZsXVJfI= github.com/deislabs/oras v0.4.0/go.mod h1:SXwPnImOu69FofPWaqgB+cPKKQRBmao5i+9xQRdcOiM= github.com/docker/distribution v0.0.0-20180611183926-749f6afb4572 h1:i5cGFDf/s0ocGJXaJHJR+NiDgMb5oLoUNpGlRZqurlY= github.com/docker/distribution v0.0.0-20180611183926-749f6afb4572/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.0.0-20180522102801-da99009bbb11 h1:UIlbTLoIplW1xQDT+7yqI8ihfHxg73S5vyUsScDWTZc= +github.com/docker/docker v0.0.0-20180522102801-da99009bbb11 h1:p8hSDXZgVhyh/C9bPlG8QMY64VeXtVfjmjIlzaQok5Q= github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.0 h1:5bhDRLn1roGiNjz8IezRngHxMfoeaXGyr0BeMHq4rD8= github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= @@ -160,7 +160,7 @@ github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTc github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openSUSE/umoci v0.4.4 h1:pTM5xAnCpYfWugNp8ZL1e83NiwDxFopqwU3RVSU6l9Y= +github.com/openSUSE/umoci v0.4.4 h1:CNwlje61gxLf8Yg8wvfFFURN2a9UtczO+6HQS9x+r+s= github.com/openSUSE/umoci v0.4.4/go.mod h1:WC0knmZfXsEOZyJUv/k3zDOCzjyaFTGMvooGTe4NzIw= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -190,7 +190,7 @@ github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be h1:MoyXp/VjXUwM0GyDcdwT7Ubea2gxOSHpPaFo3qV+Y2A= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rootless-containers/proto v0.1.0 h1:+VUNCatWCjNzgrheqcy9qN/VThibH8/cGHQG9UJy2ag= +github.com/rootless-containers/proto v0.1.0 h1:gS1JOMEtk1YDYHCzBAf/url+olMJbac7MTrgSeP6zh4= github.com/rootless-containers/proto v0.1.0/go.mod h1:vgkUFZbQd0gcE/K/ZwtE4MYjZPu0UNHLXIQxhyqAFh8= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -229,7 +229,7 @@ github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e/go.mod h1:hkRG github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vbatts/go-mtree v0.4.3 h1:U57BeTKpgmNcMu7lRJHzx6GHsstjGT7A9+iqviOuvtQ= +github.com/vbatts/go-mtree v0.4.3 h1:IC2s9EpogK3QzU+VsfuEdM7POkwnW43XDGAWO2Rb1Bo= github.com/vbatts/go-mtree v0.4.3/go.mod h1:3sazBqLG4bZYmgRTgdh9X3iKTzwBpp5CrREJDzrNSXY= github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6 h1:YqlaLDyh/+jUHgh83iNy8KiCvD4LeqnSS5U5a9cgYqU= github.com/vishvananda/netlink v1.0.1-0.20190618143317-99a56c251ae6/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= @@ -282,7 +282,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.3.0+incompatible h1:zANm08BR3aLMhyvHi+AxtY5IhnqJOnWsNhtRkKkZhmM= +gotest.tools v2.3.0+incompatible h1:oTCKjb7ZuVfn37AQodk7UysjQQL/S5Dep3pzi59u1NQ= gotest.tools v2.3.0+incompatible/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/client-go v0.0.0-20181010045704-56e7a63b5e38/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= diff --git a/vendor/github.com/openSUSE/umoci/packaging b/vendor/github.com/openSUSE/umoci/packaging deleted file mode 100644 index dd2596cf07..0000000000 --- a/vendor/github.com/openSUSE/umoci/packaging +++ /dev/null @@ -1 +0,0 @@ -contrib/pkg \ No newline at end of file diff --git a/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto b/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto deleted file mode 100644 index 448e35d835..0000000000 --- a/vendor/github.com/rootless-containers/proto/go-proto/rootlesscontainers.proto +++ /dev/null @@ -1 +0,0 @@ -../rootlesscontainers.proto \ No newline at end of file